10#include "TargetInfo.h"
12#include "llvm/ADT/SmallBitVector.h"
20bool IsX86_MMXType(llvm::Type *IRType) {
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
23 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
24 IRType->getScalarSizeInBits() != 64;
30 if (Constraint ==
"k") {
32 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
43 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
44 if (BT->getKind() == BuiltinType::LongDouble) {
46 &llvm::APFloat::x87DoubleExtended())
55 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
63static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
64 return NumMembers <= 4;
68static ABIArgInfo getDirectX86Hva(llvm::Type*
T =
nullptr) {
71 AI.setCanBeFlattened(
false);
82 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
85 llvm::SmallBitVector IsPreassigned;
86 unsigned CC = CallingConv::CC_C;
87 unsigned FreeRegs = 0;
88 unsigned FreeSSERegs = 0;
94class X86_32ABIInfo :
public ABIInfo {
100 static const unsigned MinABIStackAlignInBytes = 4;
102 bool IsDarwinVectorABI;
103 bool IsRetSmallStructInRegABI;
104 bool IsWin32StructABI;
108 unsigned DefaultNumRegisterParameters;
110 static bool isRegisterSize(
unsigned Size) {
111 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
116 return isX86VectorTypeForVectorCall(
getContext(), Ty);
120 uint64_t NumMembers)
const override {
122 return isX86VectorCallAggregateSmallEnough(NumMembers);
134 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
139 unsigned ArgIndex)
const;
143 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
145 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
146 bool &NeedsPadding)
const;
147 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
149 bool canExpandIndirectArgument(
QualType Ty)
const;
158 void runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const;
167 bool RetSmallStructInRegABI,
bool Win32StructABI,
168 unsigned NumRegisterParameters,
bool SoftFloatABI)
169 :
ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
170 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
171 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
172 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
173 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
174 CGT.getTarget().getTriple().isOSCygMing()),
175 DefaultNumRegisterParameters(NumRegisterParameters) {}
184 bool AsReturnValue)
const override {
196 bool RetSmallStructInRegABI,
bool Win32StructABI,
197 unsigned NumRegisterParameters,
bool SoftFloatABI)
199 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
200 NumRegisterParameters, SoftFloatABI)) {
201 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
204 static bool isStructReturnInRegABI(
217 llvm::Value *
Address)
const override;
220 StringRef Constraint,
221 llvm::Type* Ty)
const override {
222 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
226 std::string &Constraints,
227 std::vector<llvm::Type *> &ResultRegTypes,
228 std::vector<llvm::Type *> &ResultTruncRegTypes,
229 std::vector<LValue> &ResultRegDests,
230 std::string &AsmString,
231 unsigned NumOutputs)
const override;
234 return "movl\t%ebp, %ebp"
235 "\t\t// marker for objc_retainAutoreleaseReturnValue";
251 std::string &AsmString) {
253 llvm::raw_string_ostream OS(Buf);
255 while (Pos < AsmString.size()) {
256 size_t DollarStart = AsmString.find(
'$', Pos);
257 if (DollarStart == std::string::npos)
258 DollarStart = AsmString.size();
259 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
260 if (DollarEnd == std::string::npos)
261 DollarEnd = AsmString.size();
262 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
264 size_t NumDollars = DollarEnd - DollarStart;
265 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
267 size_t DigitStart = Pos;
268 if (AsmString[DigitStart] ==
'{') {
272 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
273 if (DigitEnd == std::string::npos)
274 DigitEnd = AsmString.size();
275 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
276 unsigned OperandIndex;
277 if (!OperandStr.getAsInteger(10, OperandIndex)) {
278 if (OperandIndex >= FirstIn)
279 OperandIndex += NumNewOuts;
287 AsmString = std::move(OS.str());
291void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
293 std::vector<llvm::Type *> &ResultRegTypes,
294 std::vector<llvm::Type *> &ResultTruncRegTypes,
295 std::vector<LValue> &ResultRegDests, std::string &AsmString,
296 unsigned NumOutputs)
const {
301 if (!Constraints.empty())
303 if (RetWidth <= 32) {
304 Constraints +=
"={eax}";
305 ResultRegTypes.push_back(CGF.
Int32Ty);
309 ResultRegTypes.push_back(CGF.
Int64Ty);
313 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
314 ResultTruncRegTypes.push_back(CoerceTy);
318 ResultRegDests.push_back(ReturnSlot);
325bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
331 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
337 if (Size == 64 || Size == 128)
352 return shouldReturnTypeInRegister(AT->getElementType(), Context);
356 if (!RT)
return false;
368 if (!shouldReturnTypeInRegister(FD->getType(), Context))
377 Ty = CTy->getElementType();
387 return Size == 32 || Size == 64;
392 for (
const auto *FD : RD->
fields()) {
402 if (FD->isBitField())
427bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
434 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
435 if (!IsWin32StructABI) {
438 if (!CXXRD->isCLike())
442 if (CXXRD->isDynamicClass())
453 return Size == getContext().getTypeSize(Ty);
456ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(
QualType RetTy, CCState &State)
const {
459 if (State.CC != llvm::CallingConv::X86_FastCall &&
460 State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {
463 return getNaturalAlignIndirectInReg(RetTy);
465 return getNaturalAlignIndirect(RetTy,
false);
469 CCState &State)
const {
475 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
476 State.CC == llvm::CallingConv::X86_RegCall) &&
477 isHomogeneousAggregate(RetTy,
Base, NumElts)) {
484 if (IsDarwinVectorABI) {
492 llvm::Type::getInt64Ty(getVMContext()), 2));
496 if ((Size == 8 || Size == 16 || Size == 32) ||
497 (Size == 64 && VT->getNumElements() == 1))
501 return getIndirectReturnResult(RetTy, State);
511 return getIndirectReturnResult(RetTy, State);
516 return getIndirectReturnResult(RetTy, State);
527 llvm::Type::getHalfTy(getVMContext()), 2));
532 if (shouldReturnTypeInRegister(RetTy, getContext())) {
541 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
542 || SeltTy->hasPointerRepresentation())
550 return getIndirectReturnResult(RetTy, State);
555 RetTy = EnumTy->getDecl()->getIntegerType();
558 if (EIT->getNumBits() > 64)
559 return getIndirectReturnResult(RetTy, State);
565unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
566 unsigned Align)
const {
569 if (Align <= MinABIStackAlignInBytes)
577 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
581 if (!IsDarwinVectorABI) {
583 return MinABIStackAlignInBytes;
591 return MinABIStackAlignInBytes;
595 CCState &State)
const {
597 if (State.FreeRegs) {
600 return getNaturalAlignIndirectInReg(Ty);
602 return getNaturalAlignIndirect(Ty,
false);
606 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
607 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
613 bool Realign = TypeAlign > StackAlign;
618X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
625 if (K == BuiltinType::Float || K == BuiltinType::Double)
631bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
632 if (!IsSoftFloatABI) {
638 unsigned Size = getContext().getTypeSize(Ty);
639 unsigned SizeInRegs = (
Size + 31) / 32;
645 if (SizeInRegs > State.FreeRegs) {
654 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
658 State.FreeRegs -= SizeInRegs;
662bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
664 bool &NeedsPadding)
const {
671 NeedsPadding =
false;
674 if (!updateFreeRegs(Ty, State))
680 if (State.CC == llvm::CallingConv::X86_FastCall ||
681 State.CC == llvm::CallingConv::X86_VectorCall ||
682 State.CC == llvm::CallingConv::X86_RegCall) {
683 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
692bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
693 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
697 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
698 State.CC == llvm::CallingConv::X86_VectorCall))
701 if (!updateFreeRegs(Ty, State))
704 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
711void X86_32ABIInfo::runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const {
722 for (
int I = 0,
E = Args.size(); I <
E; ++I) {
727 isHomogeneousAggregate(Ty,
Base, NumElts)) {
728 if (State.FreeSSERegs >= NumElts) {
729 State.FreeSSERegs -= NumElts;
731 State.IsPreassigned.set(I);
738 unsigned ArgIndex)
const {
740 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
741 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
742 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
745 TypeInfo TI = getContext().getTypeInfo(Ty);
752 return getIndirectResult(Ty,
false, State);
753 }
else if (State.IsDelegateCall) {
756 ABIArgInfo Res = getIndirectResult(Ty,
false, State);
769 if ((IsRegCall || IsVectorCall) &&
770 isHomogeneousAggregate(Ty,
Base, NumElts)) {
771 if (State.FreeSSERegs >= NumElts) {
772 State.FreeSSERegs -= NumElts;
777 return getDirectX86Hva();
785 return getIndirectResult(Ty,
false, State);
792 return getIndirectResult(Ty,
true, State);
795 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
798 llvm::LLVMContext &LLVMContext = getVMContext();
799 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
800 bool NeedsPadding =
false;
802 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
803 unsigned SizeInRegs = (TI.
Width + 31) / 32;
805 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
811 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
818 if (IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) {
819 unsigned AlignInBits = 0;
822 getContext().getASTRecordLayout(RT->
getDecl());
825 AlignInBits = TI.
Align;
827 if (AlignInBits > 32)
828 return getIndirectResult(Ty,
false, State);
837 if (TI.
Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
838 canExpandIndirectArgument(Ty))
840 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
842 return getIndirectResult(Ty,
true, State);
849 if (IsWin32StructABI) {
850 if (TI.
Width <= 512 && State.FreeSSERegs > 0) {
854 return getIndirectResult(Ty,
false, State);
859 if (IsDarwinVectorABI) {
861 (TI.
Width == 64 && VT->getNumElements() == 1))
863 llvm::IntegerType::get(getVMContext(), TI.
Width));
866 if (IsX86_MMXType(CGT.ConvertType(Ty)))
874 Ty = EnumTy->getDecl()->getIntegerType();
876 bool InReg = shouldPrimitiveUseInReg(Ty, State);
878 if (isPromotableIntegerTypeForABI(Ty)) {
885 if (EIT->getNumBits() <= 64) {
890 return getIndirectResult(Ty,
false, State);
902 else if (State.CC == llvm::CallingConv::X86_FastCall) {
904 State.FreeSSERegs = 3;
905 }
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
907 State.FreeSSERegs = 6;
910 else if (State.CC == llvm::CallingConv::X86_RegCall) {
912 State.FreeSSERegs = 8;
913 }
else if (IsWin32StructABI) {
916 State.FreeRegs = DefaultNumRegisterParameters;
917 State.FreeSSERegs = 3;
919 State.FreeRegs = DefaultNumRegisterParameters;
926 if (State.FreeRegs) {
939 if (State.CC == llvm::CallingConv::X86_VectorCall)
940 runVectorCallFirstPass(FI, State);
942 bool UsedInAlloca =
false;
944 for (
unsigned I = 0,
E = Args.size(); I <
E; ++I) {
946 if (State.IsPreassigned.test(I))
957 rewriteWithInAlloca(FI);
966 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
971 bool IsIndirect =
false;
975 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
977 LLTy = llvm::PointerType::getUnqual(getVMContext());
978 FrameFields.push_back(LLTy);
979 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
983 StackOffset = FieldEnd.
alignTo(WordSize);
984 if (StackOffset != FieldEnd) {
985 CharUnits NumBytes = StackOffset - FieldEnd;
986 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
987 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
988 FrameFields.push_back(Ty);
1010 llvm_unreachable(
"invalid enum");
1013void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1014 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1029 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1031 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1036 if (
Ret.isIndirect() && !
Ret.getInReg()) {
1037 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
1039 Ret.setInAllocaSRet(IsWin32StructABI);
1047 for (; I !=
E; ++I) {
1049 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1052 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1060 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1073 getTypeStackAlignInBytes(Ty,
TypeInfo.
Align.getQuantity()));
1080bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1082 assert(Triple.getArch() == llvm::Triple::x86);
1084 switch (Opts.getStructReturnConvention()) {
1093 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1096 switch (Triple.getOS()) {
1097 case llvm::Triple::DragonFly:
1098 case llvm::Triple::FreeBSD:
1099 case llvm::Triple::OpenBSD:
1100 case llvm::Triple::Win32:
1109 if (!FD->
hasAttr<AnyX86InterruptAttr>())
1112 llvm::Function *Fn = cast<llvm::Function>(GV);
1113 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1119 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1120 Fn->getContext(), ByValTy);
1121 Fn->addParamAttr(0, NewAttr);
1124void X86_32TargetCodeGenInfo::setTargetAttributes(
1126 if (GV->isDeclaration())
1128 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1129 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1130 llvm::Function *
Fn = cast<llvm::Function>(GV);
1131 Fn->addFnAttr(
"stackrealign");
1138bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1143 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1154 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1160 Builder.CreateAlignedStore(
1161 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty,
Address, 9),
1167 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1182static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1184 case X86AVXABILevel::AVX512:
1186 case X86AVXABILevel::AVX:
1188 case X86AVXABILevel::None:
1191 llvm_unreachable(
"Unknown AVXLevel");
1195class X86_64ABIInfo :
public ABIInfo {
1216 static Class merge(Class Accum, Class Field);
1232 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
1260 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1261 bool isNamedArg,
bool IsRegCall =
false)
const;
1263 llvm::Type *GetByteVectorType(
QualType Ty)
const;
1264 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1265 unsigned IROffset,
QualType SourceTy,
1266 unsigned SourceOffset)
const;
1267 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1268 unsigned IROffset,
QualType SourceTy,
1269 unsigned SourceOffset)
const;
1285 unsigned &neededInt,
unsigned &neededSSE,
1287 bool IsRegCall =
false)
const;
1290 unsigned &NeededSSE,
1291 unsigned &MaxVectorWidth)
const;
1294 unsigned &NeededSSE,
1295 unsigned &MaxVectorWidth)
const;
1297 bool IsIllegalVectorType(
QualType Ty)
const;
1304 bool honorsRevision0_98()
const {
1310 bool classifyIntegerMMXAsSSE()
const {
1312 if (
getContext().getLangOpts().getClangABICompat() <=
1313 LangOptions::ClangABI::Ver3_8)
1317 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1323 bool passInt128VectorsInMem()
const {
1325 if (
getContext().getLangOpts().getClangABICompat() <=
1326 LangOptions::ClangABI::Ver9)
1330 return T.isOSLinux() ||
T.isOSNetBSD();
1336 bool Has64BitPointers;
1340 :
ABIInfo(CGT), AVXLevel(AVXLevel),
1341 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1344 unsigned neededInt, neededSSE;
1350 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1351 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1363 bool has64BitPointers()
const {
1364 return Has64BitPointers;
1369class WinX86_64ABIInfo :
public ABIInfo {
1372 :
ABIInfo(CGT), AVXLevel(AVXLevel),
1373 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1382 return isX86VectorTypeForVectorCall(
getContext(), Ty);
1386 uint64_t NumMembers)
const override {
1388 return isX86VectorCallAggregateSmallEnough(NumMembers);
1393 bool IsVectorCall,
bool IsRegCall)
const;
1407 std::make_unique<SwiftABIInfo>(CGT,
true);
1419 llvm::Value *
Address)
const override {
1420 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1429 StringRef Constraint,
1430 llvm::Type* Ty)
const override {
1431 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1443 bool HasAVXType =
false;
1444 for (CallArgList::const_iterator
1445 it = args.begin(), ie = args.end(); it != ie; ++it) {
1446 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
1461 if (GV->isDeclaration())
1463 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1464 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1465 llvm::Function *
Fn = cast<llvm::Function>(GV);
1466 Fn->addFnAttr(
"stackrealign");
1476 QualType ReturnType)
const override;
1481 llvm::StringMap<bool> &CallerMap,
1483 llvm::StringMap<bool> &CalleeMap,
1485 if (CalleeMap.empty() && CallerMap.empty()) {
1496 const llvm::StringMap<bool> &CallerMap,
1497 const llvm::StringMap<bool> &CalleeMap,
1500 bool CallerHasFeat = CallerMap.lookup(Feature);
1501 bool CalleeHasFeat = CalleeMap.lookup(Feature);
1502 if (!CallerHasFeat && !CalleeHasFeat)
1503 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1504 << IsArgument << Ty << Feature;
1507 if (!CallerHasFeat || !CalleeHasFeat)
1508 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1509 << IsArgument << Ty << Feature;
1518 const llvm::StringMap<bool> &CallerMap,
1519 const llvm::StringMap<bool> &CalleeMap,
1521 bool Caller256 = CallerMap.lookup(
"avx512f") && !CallerMap.lookup(
"evex512");
1522 bool Callee256 = CalleeMap.lookup(
"avx512f") && !CalleeMap.lookup(
"evex512");
1526 if (Caller256 || Callee256)
1527 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1528 << IsArgument << Ty <<
"evex512";
1531 "avx512f", IsArgument);
1536 const llvm::StringMap<bool> &CallerMap,
1537 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
1551void X86_64TargetCodeGenInfo::checkFunctionCallABI(
CodeGenModule &CGM,
1560 llvm::StringMap<bool> CallerMap;
1561 llvm::StringMap<bool> CalleeMap;
1562 unsigned ArgIndex = 0;
1566 for (
const CallArg &Arg : Args) {
1574 if (Arg.getType()->isVectorType() &&
1580 if (ArgIndex < Callee->getNumParams())
1581 Ty =
Callee->getParamDecl(ArgIndex)->getType();
1584 CalleeMap, Ty,
true))
1592 if (
Callee->getReturnType()->isVectorType() &&
1596 CalleeMap,
Callee->getReturnType(),
1605 bool Quote = Lib.contains(
' ');
1606 std::string ArgStr = Quote ?
"\"" :
"";
1608 if (!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1610 ArgStr += Quote ?
"\"" :
"";
1615class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1618 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1619 unsigned NumRegisterParameters)
1620 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1621 Win32StructABI, NumRegisterParameters,
false) {}
1623 void setTargetAttributes(
const Decl *
D, llvm::GlobalValue *GV,
1626 void getDependentLibraryOption(llvm::StringRef Lib,
1628 Opt =
"/DEFAULTLIB:";
1629 Opt += qualifyWindowsLibrary(Lib);
1632 void getDetectMismatchOption(llvm::StringRef Name,
1633 llvm::StringRef
Value,
1635 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1640void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1642 X86_32TargetCodeGenInfo::setTargetAttributes(
D, GV, CGM);
1643 if (GV->isDeclaration())
1645 addStackProbeTargetAttributes(
D, GV, CGM);
1655 std::make_unique<SwiftABIInfo>(CGT,
true);
1658 void setTargetAttributes(
const Decl *
D, llvm::GlobalValue *GV,
1666 llvm::Value *
Address)
const override {
1667 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1675 void getDependentLibraryOption(llvm::StringRef Lib,
1677 Opt =
"/DEFAULTLIB:";
1678 Opt += qualifyWindowsLibrary(Lib);
1681 void getDetectMismatchOption(llvm::StringRef Name,
1682 llvm::StringRef
Value,
1684 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1689void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1692 if (GV->isDeclaration())
1694 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1695 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1696 llvm::Function *
Fn = cast<llvm::Function>(GV);
1697 Fn->addFnAttr(
"stackrealign");
1703 addStackProbeTargetAttributes(
D, GV, CGM);
1706void X86_64ABIInfo::postMerge(
unsigned AggregateSize,
Class &Lo,
1731 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1733 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1735 if (Hi == SSEUp && Lo != SSE)
1739X86_64ABIInfo::Class X86_64ABIInfo::merge(
Class Accum,
Class Field) {
1763 assert((Accum != Memory && Accum != ComplexX87) &&
1764 "Invalid accumulated classification during merge.");
1765 if (Accum == Field || Field == NoClass)
1767 if (Field == Memory)
1769 if (Accum == NoClass)
1773 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1774 Accum == X87 || Accum == X87Up)
1779void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
Class &Lo,
1780 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
1791 Class &Current = OffsetBase < 64 ? Lo : Hi;
1797 if (k == BuiltinType::Void) {
1799 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1802 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1804 }
else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1805 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1807 }
else if (k == BuiltinType::Float128) {
1810 }
else if (k == BuiltinType::LongDouble) {
1811 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1812 if (LDF == &llvm::APFloat::IEEEquad()) {
1815 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1818 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
1821 llvm_unreachable(
"unexpected long double representation!");
1830 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1841 if (Has64BitPointers) {
1848 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1849 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1850 if (EB_FuncPtr != EB_ThisAdj) {
1864 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1873 uint64_t EB_Lo = (OffsetBase) / 64;
1877 }
else if (Size == 64) {
1878 QualType ElementType = VT->getElementType();
1887 if (!classifyIntegerMMXAsSSE() &&
1898 if (OffsetBase && OffsetBase != 64)
1900 }
else if (Size == 128 ||
1901 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1902 QualType ElementType = VT->getElementType();
1905 if (passInt128VectorsInMem() &&
Size != 128 &&
1937 else if (Size <= 128)
1939 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
1942 }
else if (ET == getContext().DoubleTy) {
1944 }
else if (ET == getContext().LongDoubleTy) {
1945 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1946 if (LDF == &llvm::APFloat::IEEEquad())
1948 else if (LDF == &llvm::APFloat::x87DoubleExtended())
1949 Current = ComplexX87;
1950 else if (LDF == &llvm::APFloat::IEEEdouble())
1953 llvm_unreachable(
"unexpected long double representation!");
1958 uint64_t EB_Real = (OffsetBase) / 64;
1959 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1960 if (Hi == NoClass && EB_Real != EB_Imag)
1967 if (EITy->getNumBits() <= 64)
1969 else if (EITy->getNumBits() <= 128)
1984 if (!IsRegCall && Size > 512)
1991 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1997 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1998 uint64_t ArraySize = AT->getZExtSize();
2005 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2008 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2009 Class FieldLo, FieldHi;
2010 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2011 Lo = merge(Lo, FieldLo);
2012 Hi = merge(Hi, FieldHi);
2013 if (Lo == Memory || Hi == Memory)
2017 postMerge(Size, Lo, Hi);
2018 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2048 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2049 for (
const auto &I : CXXRD->bases()) {
2050 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2051 "Unexpected base class!");
2060 Class FieldLo, FieldHi;
2063 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2064 Lo = merge(Lo, FieldLo);
2065 Hi = merge(Hi, FieldHi);
2066 if (Lo == Memory || Hi == Memory) {
2067 postMerge(Size, Lo, Hi);
2075 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2077 getContext().getTargetInfo().getTriple().isPS();
2078 bool IsUnion = RT->
isUnionType() && !UseClang11Compat;
2081 i != e; ++i, ++idx) {
2083 bool BitField = i->isBitField();
2086 if (BitField && i->isUnnamedBitField())
2099 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2100 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2102 postMerge(Size, Lo, Hi);
2107 Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
2109 if (!BitField && IsInMemory) {
2111 postMerge(Size, Lo, Hi);
2121 Class FieldLo, FieldHi;
2127 assert(!i->isUnnamedBitField());
2135 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2140 FieldHi = EB_Hi ?
Integer : NoClass;
2143 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2144 Lo = merge(Lo, FieldLo);
2145 Hi = merge(Hi, FieldHi);
2146 if (Lo == Memory || Hi == Memory)
2150 postMerge(Size, Lo, Hi);
2160 Ty = EnumTy->getDecl()->getIntegerType();
2163 return getNaturalAlignIndirect(Ty);
2169 return getNaturalAlignIndirect(Ty);
2172bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2175 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2176 if (Size <= 64 || Size > LargestVector)
2178 QualType EltTy = VecTy->getElementType();
2179 if (passInt128VectorsInMem() &&
2189 unsigned freeIntRegs)
const {
2202 Ty = EnumTy->getDecl()->getIntegerType();
2213 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2236 if (freeIntRegs == 0) {
2241 if (Align == 8 && Size <= 64)
2251llvm::Type *X86_64ABIInfo::GetByteVectorType(
QualType Ty)
const {
2257 llvm::Type *IRType = CGT.ConvertType(Ty);
2258 if (isa<llvm::VectorType>(IRType)) {
2261 if (passInt128VectorsInMem() &&
2262 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
2265 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2272 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2277 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2281 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2298 if (TySize <= StartBit)
2303 unsigned NumElts = (
unsigned)AT->getZExtSize();
2306 for (
unsigned i = 0; i != NumElts; ++i) {
2308 unsigned EltOffset = i*EltSize;
2309 if (EltOffset >= EndBit)
break;
2311 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2313 EndBit-EltOffset, Context))
2325 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2326 for (
const auto &I : CXXRD->bases()) {
2327 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2328 "Unexpected base class!");
2334 if (BaseOffset >= EndBit)
continue;
2336 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2338 EndBit-BaseOffset, Context))
2349 i != e; ++i, ++idx) {
2353 if (FieldOffset >= EndBit)
break;
2355 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2371 const llvm::DataLayout &TD) {
2372 if (IROffset == 0 && IRType->isFloatingPointTy())
2376 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2377 if (!STy->getNumContainedTypes())
2380 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2381 unsigned Elt = SL->getElementContainingOffset(IROffset);
2382 IROffset -= SL->getElementOffset(Elt);
2387 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2388 llvm::Type *EltTy = ATy->getElementType();
2389 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2390 IROffset -= IROffset / EltSize * EltSize;
2399llvm::Type *X86_64ABIInfo::
2400GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2401 QualType SourceTy,
unsigned SourceOffset)
const {
2402 const llvm::DataLayout &TD = getDataLayout();
2403 unsigned SourceSize =
2404 (
unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2406 if (!T0 || T0->isDoubleTy())
2407 return llvm::Type::getDoubleTy(getVMContext());
2410 llvm::Type *T1 =
nullptr;
2411 unsigned T0Size = TD.getTypeAllocSize(T0);
2412 if (SourceSize > T0Size)
2414 if (T1 ==
nullptr) {
2417 if (T0->is16bitFPTy() && SourceSize > 4)
2426 if (T0->isFloatTy() && T1->isFloatTy())
2427 return llvm::FixedVectorType::get(T0, 2);
2429 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2430 llvm::Type *T2 =
nullptr;
2434 return llvm::FixedVectorType::get(T0, 2);
2435 return llvm::FixedVectorType::get(T0, 4);
2438 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2439 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2441 return llvm::Type::getDoubleTy(getVMContext());
2459llvm::Type *X86_64ABIInfo::
2460GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2461 QualType SourceTy,
unsigned SourceOffset)
const {
2464 if (IROffset == 0) {
2466 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2467 IRType->isIntegerTy(64))
2476 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2477 IRType->isIntegerTy(32) ||
2478 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2479 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2480 cast<llvm::IntegerType>(IRType)->getBitWidth();
2483 SourceOffset*8+64, getContext()))
2488 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2490 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2491 if (IROffset < SL->getSizeInBytes()) {
2492 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2493 IROffset -= SL->getElementOffset(FieldIdx);
2495 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2496 SourceTy, SourceOffset);
2500 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2501 llvm::Type *EltTy = ATy->getElementType();
2502 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2503 unsigned EltOffset = IROffset/EltSize*EltSize;
2504 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2510 unsigned TySizeInBytes =
2511 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2513 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2517 return llvm::IntegerType::get(getVMContext(),
2518 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2529 const llvm::DataLayout &TD) {
2534 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2535 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2536 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2537 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2549 if (Lo->isHalfTy() || Lo->isFloatTy())
2550 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2552 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2553 &&
"Invalid/unknown lo type");
2554 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2558 llvm::StructType *
Result = llvm::StructType::get(Lo, Hi);
2561 assert(TD.getStructLayout(
Result)->getElementOffset(1) == 8 &&
2562 "Invalid x86-64 argument pair!");
2567classifyReturnType(
QualType RetTy)
const {
2570 X86_64ABIInfo::Class Lo, Hi;
2571 classify(RetTy, 0, Lo, Hi,
true);
2574 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2575 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2577 llvm::Type *ResType =
nullptr;
2584 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2585 "Unknown missing lo part");
2590 llvm_unreachable(
"Invalid classification for lo word.");
2595 return getIndirectReturnResult(RetTy);
2600 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2604 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2607 RetTy = EnumTy->getDecl()->getIntegerType();
2610 isPromotableIntegerTypeForABI(RetTy))
2618 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2624 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2631 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2632 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2633 llvm::Type::getX86_FP80Ty(getVMContext()));
2637 llvm::Type *HighPart =
nullptr;
2643 llvm_unreachable(
"Invalid classification for hi word.");
2650 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2655 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2666 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2667 ResType = GetByteVectorType(RetTy);
2678 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2695X86_64ABIInfo::classifyArgumentType(
QualType Ty,
unsigned freeIntRegs,
2696 unsigned &neededInt,
unsigned &neededSSE,
2697 bool isNamedArg,
bool IsRegCall)
const {
2700 X86_64ABIInfo::Class Lo, Hi;
2701 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2705 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2706 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2710 llvm::Type *ResType =
nullptr;
2717 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2718 "Unknown missing lo part");
2731 return getIndirectResult(Ty, freeIntRegs);
2735 llvm_unreachable(
"Invalid classification for lo word.");
2744 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2748 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2751 Ty = EnumTy->getDecl()->getIntegerType();
2754 isPromotableIntegerTypeForABI(Ty))
2764 llvm::Type *IRType = CGT.ConvertType(Ty);
2765 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2771 llvm::Type *HighPart =
nullptr;
2779 llvm_unreachable(
"Invalid classification for hi word.");
2781 case NoClass:
break;
2786 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2797 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2807 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2808 ResType = GetByteVectorType(Ty);
2822X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
2823 unsigned &NeededSSE,
2824 unsigned &MaxVectorWidth)
const {
2826 assert(RT &&
"classifyRegCallStructType only valid with struct types");
2829 return getIndirectReturnResult(Ty);
2832 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->
getDecl())) {
2833 if (CXXRD->isDynamicClass()) {
2834 NeededInt = NeededSSE = 0;
2835 return getIndirectReturnResult(Ty);
2838 for (
const auto &I : CXXRD->bases())
2839 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2842 NeededInt = NeededSSE = 0;
2843 return getIndirectReturnResult(Ty);
2851 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2854 NeededInt = NeededSSE = 0;
2855 return getIndirectReturnResult(Ty);
2858 unsigned LocalNeededInt, LocalNeededSSE;
2862 NeededInt = NeededSSE = 0;
2863 return getIndirectReturnResult(Ty);
2865 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
2866 MTy = AT->getElementType();
2868 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2869 MaxVectorWidth = getContext().getTypeSize(VT);
2870 NeededInt += LocalNeededInt;
2871 NeededSSE += LocalNeededSSE;
2879X86_64ABIInfo::classifyRegCallStructType(
QualType Ty,
unsigned &NeededInt,
2880 unsigned &NeededSSE,
2881 unsigned &MaxVectorWidth)
const {
2887 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2898 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2899 Win64ABIInfo.computeInfo(FI);
2903 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
2906 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2907 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2908 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2915 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2916 FreeIntRegs -= NeededInt;
2917 FreeSSERegs -= NeededSSE;
2925 getContext().LongDoubleTy)
2937 else if (NeededSSE && MaxVectorWidth > 0)
2949 it != ie; ++it, ++ArgNo) {
2950 bool IsNamedArg = ArgNo < NumRequiredArgs;
2952 if (IsRegCall && it->type->isStructureOrClassType())
2953 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2957 NeededSSE, IsNamedArg);
2963 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2964 FreeIntRegs -= NeededInt;
2965 FreeSSERegs -= NeededSSE;
2969 it->info = getIndirectResult(it->type, FreeIntRegs);
2978 llvm::Value *overflow_arg_area =
2993 llvm::Value *Res = overflow_arg_area;
3001 llvm::Value *Offset =
3002 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3004 Offset,
"overflow_arg_area.next");
3008 return Address(Res, LTy, Align);
3020 unsigned neededInt, neededSSE;
3032 if (!neededInt && !neededSSE)
3048 llvm::Value *InRegs =
nullptr;
3050 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3054 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3055 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3061 llvm::Value *FitsInFP =
3062 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3063 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3064 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3070 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3091 if (neededInt && neededSSE) {
3093 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3097 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3098 llvm::Type *TyLo = ST->getElementType(0);
3099 llvm::Type *TyHi = ST->getElementType(1);
3100 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3101 "Unexpected ABI info for mixed regs");
3102 llvm::Value *GPAddr =
3104 llvm::Value *FPAddr =
3106 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3107 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3123 }
else if (neededInt) {
3128 auto TInfo = getContext().getTypeInfoInChars(Ty);
3129 uint64_t TySize = TInfo.Width.getQuantity();
3140 }
else if (neededSSE == 1) {
3144 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3177 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3182 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3205 uint64_t Width = getContext().getTypeSize(Ty);
3206 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3214ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3220 isHomogeneousAggregate(Ty,
Base, NumElts) && FreeSSERegs >= NumElts) {
3221 FreeSSERegs -= NumElts;
3222 return getDirectX86Hva();
3228 bool IsReturnType,
bool IsVectorCall,
3229 bool IsRegCall)
const {
3235 Ty = EnumTy->getDecl()->getIntegerType();
3237 TypeInfo Info = getContext().getTypeInfo(Ty);
3243 if (!IsReturnType) {
3249 return getNaturalAlignIndirect(Ty,
false);
3257 if ((IsVectorCall || IsRegCall) &&
3258 isHomogeneousAggregate(Ty,
Base, NumElts)) {
3260 if (FreeSSERegs >= NumElts) {
3261 FreeSSERegs -= NumElts;
3267 }
else if (IsVectorCall) {
3268 if (FreeSSERegs >= NumElts &&
3270 FreeSSERegs -= NumElts;
3272 }
else if (IsReturnType) {
3284 llvm::Type *LLTy = CGT.ConvertType(Ty);
3285 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3292 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3293 return getNaturalAlignIndirect(Ty,
false);
3300 switch (BT->getKind()) {
3301 case BuiltinType::Bool:
3306 case BuiltinType::LongDouble:
3310 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3311 if (LDF == &llvm::APFloat::x87DoubleExtended())
3316 case BuiltinType::Int128:
3317 case BuiltinType::UInt128:
3327 llvm::Type::getInt64Ty(getVMContext()), 2));
3350 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3351 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3355 if (CC == llvm::CallingConv::X86_64_SysV) {
3356 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3357 SysVABIInfo.computeInfo(FI);
3361 unsigned FreeSSERegs = 0;
3365 }
else if (IsRegCall) {
3372 IsVectorCall, IsRegCall);
3377 }
else if (IsRegCall) {
3382 unsigned ArgNum = 0;
3383 unsigned ZeroSSERegs = 0;
3388 unsigned *MaybeFreeSSERegs =
3389 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3391 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3399 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3407 uint64_t Width = getContext().getTypeSize(Ty);
3408 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3417 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3418 unsigned NumRegisterParameters,
bool SoftFloatABI) {
3419 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3421 return std::make_unique<X86_32TargetCodeGenInfo>(
3422 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3423 NumRegisterParameters, SoftFloatABI);
3427 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3428 unsigned NumRegisterParameters) {
3429 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3431 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3432 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3433 NumRegisterParameters);
3436std::unique_ptr<TargetCodeGenInfo>
3439 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3442std::unique_ptr<TargetCodeGenInfo>
3445 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const TargetInfo & getTargetInfo() const
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
ASTContext & getContext() const
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
const TargetInfo & getTarget() const
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CallArgList - Type for representing both the value and type of arguments in a call.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::Type * ConvertTypeForMem(QualType T)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
ASTContext & getContext() const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
llvm::LLVMContext & getLLVMContext()
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Address getAddress() const
void setAddress(Address address)
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
A class for recording the number of arguments that a function signature requires.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
bool occupiesMoreThan(ArrayRef< llvm::Type * > scalarTypes, unsigned maxAllRegisters) const
Does the given lowering require more than the given number of registers when expanded?
virtual bool shouldPassIndirectly(ArrayRef< llvm::Type * > ComponentTys, bool AsReturnValue) const
Returns true if an aggregate which expands to the given type sequence should be passed / returned ind...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
virtual llvm::Type * adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type *Ty) const
Corrects the low-level LLVM type for a given constraint and "usual" type.
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Initializes the given DWARF EH register-size table, a char*.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
virtual bool markARCOptimizedReturnCallsAsNoTail() const
Determine whether a call to objc_retainAutoreleasedReturnValue or objc_unsafeClaimAutoreleasedReturnV...
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
QualType getElementType() const
Represents the canonical version of C arrays with a specified constant size.
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
CallingConv getCallConv() const
@ Ver11
Attempt to be ABI-compatible with code generated by Clang 11.0.x (git 2e10b7a39b93).
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
const llvm::fltSemantics & getLongDoubleFormat() const
The base class of the type hierarchy.
bool isBlockPointerType() const
bool isFloat16Type() const
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
The JSON file list parser is used to communicate input to InstallAPI.
@ Result
The result type of a method or function.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
@ Class
The "class" keyword introduces the elaborated-type-specifier.
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty