10#include "TargetInfo.h"
12#include "llvm/ADT/SmallBitVector.h"
20bool IsX86_MMXType(llvm::Type *IRType) {
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
23 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
24 IRType->getScalarSizeInBits() != 64;
30 if (Constraint ==
"k") {
32 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
43 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
44 if (BT->getKind() == BuiltinType::LongDouble) {
46 &llvm::APFloat::x87DoubleExtended())
55 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
63static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
64 return NumMembers <= 4;
68static ABIArgInfo getDirectX86Hva(llvm::Type*
T =
nullptr) {
71 AI.setCanBeFlattened(
false);
82 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
85 llvm::SmallBitVector IsPreassigned;
86 unsigned CC = CallingConv::CC_C;
87 unsigned FreeRegs = 0;
88 unsigned FreeSSERegs = 0;
94class X86_32ABIInfo :
public ABIInfo {
100 static const unsigned MinABIStackAlignInBytes = 4;
102 bool IsDarwinVectorABI;
103 bool IsRetSmallStructInRegABI;
104 bool IsWin32StructABI;
108 unsigned DefaultNumRegisterParameters;
110 static bool isRegisterSize(
unsigned Size) {
111 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
116 return isX86VectorTypeForVectorCall(
getContext(), Ty);
120 uint64_t NumMembers)
const override {
122 return isX86VectorCallAggregateSmallEnough(NumMembers);
134 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
139 unsigned ArgIndex)
const;
143 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
145 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
146 bool &NeedsPadding)
const;
147 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
149 bool canExpandIndirectArgument(
QualType Ty)
const;
158 void runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const;
167 bool RetSmallStructInRegABI,
bool Win32StructABI,
168 unsigned NumRegisterParameters,
bool SoftFloatABI)
169 :
ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
170 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
171 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
172 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
173 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
174 CGT.getTarget().getTriple().isOSCygMing()),
175 DefaultNumRegisterParameters(NumRegisterParameters) {}
184 bool AsReturnValue)
const override {
196 bool RetSmallStructInRegABI,
bool Win32StructABI,
197 unsigned NumRegisterParameters,
bool SoftFloatABI)
199 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
200 NumRegisterParameters, SoftFloatABI)) {
201 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
204 static bool isStructReturnInRegABI(
217 llvm::Value *
Address)
const override;
220 StringRef Constraint,
221 llvm::Type* Ty)
const override {
222 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
226 std::string &Constraints,
227 std::vector<llvm::Type *> &ResultRegTypes,
228 std::vector<llvm::Type *> &ResultTruncRegTypes,
229 std::vector<LValue> &ResultRegDests,
230 std::string &AsmString,
231 unsigned NumOutputs)
const override;
234 return "movl\t%ebp, %ebp"
235 "\t\t// marker for objc_retainAutoreleaseReturnValue";
251 std::string &AsmString) {
253 llvm::raw_string_ostream OS(Buf);
255 while (Pos < AsmString.size()) {
256 size_t DollarStart = AsmString.find(
'$', Pos);
257 if (DollarStart == std::string::npos)
258 DollarStart = AsmString.size();
259 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
260 if (DollarEnd == std::string::npos)
261 DollarEnd = AsmString.size();
262 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
264 size_t NumDollars = DollarEnd - DollarStart;
265 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
267 size_t DigitStart = Pos;
268 if (AsmString[DigitStart] ==
'{') {
272 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
273 if (DigitEnd == std::string::npos)
274 DigitEnd = AsmString.size();
275 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
276 unsigned OperandIndex;
277 if (!OperandStr.getAsInteger(10, OperandIndex)) {
278 if (OperandIndex >= FirstIn)
279 OperandIndex += NumNewOuts;
287 AsmString = std::move(OS.str());
291void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
293 std::vector<llvm::Type *> &ResultRegTypes,
294 std::vector<llvm::Type *> &ResultTruncRegTypes,
295 std::vector<LValue> &ResultRegDests, std::string &AsmString,
296 unsigned NumOutputs)
const {
301 if (!Constraints.empty())
303 if (RetWidth <= 32) {
304 Constraints +=
"={eax}";
305 ResultRegTypes.push_back(CGF.
Int32Ty);
309 ResultRegTypes.push_back(CGF.
Int64Ty);
313 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
314 ResultTruncRegTypes.push_back(CoerceTy);
318 ResultRegDests.push_back(ReturnSlot);
325bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
331 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
337 if (Size == 64 || Size == 128)
352 return shouldReturnTypeInRegister(AT->getElementType(), Context);
356 if (!RT)
return false;
368 if (!shouldReturnTypeInRegister(FD->getType(), Context))
377 Ty = CTy->getElementType();
387 return Size == 32 || Size == 64;
392 for (
const auto *FD : RD->
fields()) {
402 if (FD->isBitField())
427bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
434 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
435 if (!IsWin32StructABI) {
438 if (!CXXRD->isCLike())
442 if (CXXRD->isDynamicClass())
453 return Size == getContext().getTypeSize(Ty);
456ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(
QualType RetTy, CCState &State)
const {
459 if (State.CC != llvm::CallingConv::X86_FastCall &&
460 State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {
463 return getNaturalAlignIndirectInReg(RetTy);
465 return getNaturalAlignIndirect(RetTy,
false);
469 CCState &State)
const {
475 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
476 State.CC == llvm::CallingConv::X86_RegCall) &&
477 isHomogeneousAggregate(RetTy,
Base, NumElts)) {
484 if (IsDarwinVectorABI) {
492 llvm::Type::getInt64Ty(getVMContext()), 2));
496 if ((Size == 8 || Size == 16 || Size == 32) ||
497 (Size == 64 && VT->getNumElements() == 1))
501 return getIndirectReturnResult(RetTy, State);
511 return getIndirectReturnResult(RetTy, State);
516 return getIndirectReturnResult(RetTy, State);
527 llvm::Type::getHalfTy(getVMContext()), 2));
532 if (shouldReturnTypeInRegister(RetTy, getContext())) {
541 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
542 || SeltTy->hasPointerRepresentation())
550 return getIndirectReturnResult(RetTy, State);
555 RetTy = EnumTy->getDecl()->getIntegerType();
558 if (EIT->getNumBits() > 64)
559 return getIndirectReturnResult(RetTy, State);
565unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
566 unsigned Align)
const {
569 if (Align <= MinABIStackAlignInBytes)
577 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
581 if (!IsDarwinVectorABI) {
583 return MinABIStackAlignInBytes;
591 return MinABIStackAlignInBytes;
595 CCState &State)
const {
597 if (State.FreeRegs) {
600 return getNaturalAlignIndirectInReg(Ty);
602 return getNaturalAlignIndirect(Ty,
false);
606 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
607 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
613 bool Realign = TypeAlign > StackAlign;
618X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
625 if (K == BuiltinType::Float || K == BuiltinType::Double)
631bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
632 if (!IsSoftFloatABI) {
638 unsigned Size = getContext().getTypeSize(Ty);
639 unsigned SizeInRegs = (
Size + 31) / 32;
645 if (SizeInRegs > State.FreeRegs) {
654 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
658 State.FreeRegs -= SizeInRegs;
662bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
664 bool &NeedsPadding)
const {
671 NeedsPadding =
false;
674 if (!updateFreeRegs(Ty, State))
680 if (State.CC == llvm::CallingConv::X86_FastCall ||
681 State.CC == llvm::CallingConv::X86_VectorCall ||
682 State.CC == llvm::CallingConv::X86_RegCall) {
683 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
692bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
693 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
697 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
698 State.CC == llvm::CallingConv::X86_VectorCall))
701 if (!updateFreeRegs(Ty, State))
704 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
711void X86_32ABIInfo::runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const {
722 for (
int I = 0,
E = Args.size(); I <
E; ++I) {
727 isHomogeneousAggregate(Ty,
Base, NumElts)) {
728 if (State.FreeSSERegs >= NumElts) {
729 State.FreeSSERegs -= NumElts;
731 State.IsPreassigned.set(I);
738 unsigned ArgIndex)
const {
740 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
741 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
742 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
745 TypeInfo TI = getContext().getTypeInfo(Ty);
752 return getIndirectResult(Ty,
false, State);
753 }
else if (State.IsDelegateCall) {
756 ABIArgInfo Res = getIndirectResult(Ty,
false, State);
769 if ((IsRegCall || IsVectorCall) &&
770 isHomogeneousAggregate(Ty,
Base, NumElts)) {
771 if (State.FreeSSERegs >= NumElts) {
772 State.FreeSSERegs -= NumElts;
777 return getDirectX86Hva();
785 return getIndirectResult(Ty,
false, State);
792 return getIndirectResult(Ty,
true, State);
795 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
802 llvm::LLVMContext &LLVMContext = getVMContext();
803 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
804 bool NeedsPadding =
false;
806 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
807 unsigned SizeInRegs = (TI.
Width + 31) / 32;
809 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
815 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
822 if (IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) {
823 unsigned AlignInBits = 0;
826 getContext().getASTRecordLayout(RT->
getDecl());
829 AlignInBits = TI.
Align;
831 if (AlignInBits > 32)
832 return getIndirectResult(Ty,
false, State);
841 if (TI.
Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
842 canExpandIndirectArgument(Ty))
844 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
846 return getIndirectResult(Ty,
true, State);
853 if (IsWin32StructABI) {
854 if (TI.
Width <= 512 && State.FreeSSERegs > 0) {
858 return getIndirectResult(Ty,
false, State);
863 if (IsDarwinVectorABI) {
865 (TI.
Width == 64 && VT->getNumElements() == 1))
867 llvm::IntegerType::get(getVMContext(), TI.
Width));
870 if (IsX86_MMXType(CGT.ConvertType(Ty)))
878 Ty = EnumTy->getDecl()->getIntegerType();
880 bool InReg = shouldPrimitiveUseInReg(Ty, State);
882 if (isPromotableIntegerTypeForABI(Ty)) {
889 if (EIT->getNumBits() <= 64) {
894 return getIndirectResult(Ty,
false, State);
906 else if (State.CC == llvm::CallingConv::X86_FastCall) {
908 State.FreeSSERegs = 3;
909 }
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
911 State.FreeSSERegs = 6;
914 else if (State.CC == llvm::CallingConv::X86_RegCall) {
916 State.FreeSSERegs = 8;
917 }
else if (IsWin32StructABI) {
920 State.FreeRegs = DefaultNumRegisterParameters;
921 State.FreeSSERegs = 3;
923 State.FreeRegs = DefaultNumRegisterParameters;
930 if (State.FreeRegs) {
943 if (State.CC == llvm::CallingConv::X86_VectorCall)
944 runVectorCallFirstPass(FI, State);
946 bool UsedInAlloca =
false;
948 for (
unsigned I = 0,
E = Args.size(); I <
E; ++I) {
950 if (State.IsPreassigned.test(I))
961 rewriteWithInAlloca(FI);
970 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
975 bool IsIndirect =
false;
979 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
981 LLTy = llvm::PointerType::getUnqual(getVMContext());
982 FrameFields.push_back(LLTy);
983 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
987 StackOffset = FieldEnd.
alignTo(WordSize);
988 if (StackOffset != FieldEnd) {
989 CharUnits NumBytes = StackOffset - FieldEnd;
990 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
991 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
992 FrameFields.push_back(Ty);
1014 llvm_unreachable(
"invalid enum");
1017void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1018 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1033 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1035 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1040 if (
Ret.isIndirect() && !
Ret.getInReg()) {
1041 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
1043 Ret.setInAllocaSRet(IsWin32StructABI);
1051 for (; I !=
E; ++I) {
1053 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1056 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1064 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1077 getTypeStackAlignInBytes(Ty,
TypeInfo.
Align.getQuantity()));
1084bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1086 assert(Triple.getArch() == llvm::Triple::x86);
1088 switch (Opts.getStructReturnConvention()) {
1097 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1100 switch (Triple.getOS()) {
1101 case llvm::Triple::DragonFly:
1102 case llvm::Triple::FreeBSD:
1103 case llvm::Triple::OpenBSD:
1104 case llvm::Triple::Win32:
1113 if (!FD->
hasAttr<AnyX86InterruptAttr>())
1116 llvm::Function *Fn = cast<llvm::Function>(GV);
1117 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1123 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1124 Fn->getContext(), ByValTy);
1125 Fn->addParamAttr(0, NewAttr);
1128void X86_32TargetCodeGenInfo::setTargetAttributes(
1130 if (GV->isDeclaration())
1132 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1133 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1134 llvm::Function *
Fn = cast<llvm::Function>(GV);
1135 Fn->addFnAttr(
"stackrealign");
1142bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1147 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1158 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1164 Builder.CreateAlignedStore(
1165 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty,
Address, 9),
1171 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1186static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1188 case X86AVXABILevel::AVX512:
1190 case X86AVXABILevel::AVX:
1192 case X86AVXABILevel::None:
1195 llvm_unreachable(
"Unknown AVXLevel");
1199class X86_64ABIInfo :
public ABIInfo {
1220 static Class merge(Class Accum, Class Field);
1236 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
1264 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1265 bool isNamedArg,
bool IsRegCall =
false)
const;
1267 llvm::Type *GetByteVectorType(
QualType Ty)
const;
1268 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1269 unsigned IROffset,
QualType SourceTy,
1270 unsigned SourceOffset)
const;
1271 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1272 unsigned IROffset,
QualType SourceTy,
1273 unsigned SourceOffset)
const;
1289 unsigned &neededInt,
unsigned &neededSSE,
1291 bool IsRegCall =
false)
const;
1294 unsigned &NeededSSE,
1295 unsigned &MaxVectorWidth)
const;
1298 unsigned &NeededSSE,
1299 unsigned &MaxVectorWidth)
const;
1301 bool IsIllegalVectorType(
QualType Ty)
const;
1308 bool honorsRevision0_98()
const {
1314 bool classifyIntegerMMXAsSSE()
const {
1316 if (
getContext().getLangOpts().getClangABICompat() <=
1317 LangOptions::ClangABI::Ver3_8)
1321 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1327 bool passInt128VectorsInMem()
const {
1329 if (
getContext().getLangOpts().getClangABICompat() <=
1330 LangOptions::ClangABI::Ver9)
1334 return T.isOSLinux() ||
T.isOSNetBSD();
1340 bool Has64BitPointers;
1344 :
ABIInfo(CGT), AVXLevel(AVXLevel),
1345 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1348 unsigned neededInt, neededSSE;
1354 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1355 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1367 bool has64BitPointers()
const {
1368 return Has64BitPointers;
1373class WinX86_64ABIInfo :
public ABIInfo {
1376 :
ABIInfo(CGT), AVXLevel(AVXLevel),
1377 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1386 return isX86VectorTypeForVectorCall(
getContext(), Ty);
1390 uint64_t NumMembers)
const override {
1392 return isX86VectorCallAggregateSmallEnough(NumMembers);
1397 bool IsVectorCall,
bool IsRegCall)
const;
1411 std::make_unique<SwiftABIInfo>(CGT,
true);
1423 llvm::Value *
Address)
const override {
1424 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1433 StringRef Constraint,
1434 llvm::Type* Ty)
const override {
1435 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1447 bool HasAVXType =
false;
1448 for (CallArgList::const_iterator
1449 it = args.begin(), ie = args.end(); it != ie; ++it) {
1450 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
1465 if (GV->isDeclaration())
1467 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1468 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1469 llvm::Function *
Fn = cast<llvm::Function>(GV);
1470 Fn->addFnAttr(
"stackrealign");
1480 QualType ReturnType)
const override;
1485 llvm::StringMap<bool> &CallerMap,
1487 llvm::StringMap<bool> &CalleeMap,
1489 if (CalleeMap.empty() && CallerMap.empty()) {
1500 const llvm::StringMap<bool> &CallerMap,
1501 const llvm::StringMap<bool> &CalleeMap,
1504 bool CallerHasFeat = CallerMap.lookup(Feature);
1505 bool CalleeHasFeat = CalleeMap.lookup(Feature);
1506 if (!CallerHasFeat && !CalleeHasFeat)
1507 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1508 << IsArgument << Ty << Feature;
1511 if (!CallerHasFeat || !CalleeHasFeat)
1512 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1513 << IsArgument << Ty << Feature;
1522 const llvm::StringMap<bool> &CallerMap,
1523 const llvm::StringMap<bool> &CalleeMap,
1525 bool Caller256 = CallerMap.lookup(
"avx512f") && !CallerMap.lookup(
"evex512");
1526 bool Callee256 = CalleeMap.lookup(
"avx512f") && !CalleeMap.lookup(
"evex512");
1530 if (Caller256 || Callee256)
1531 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1532 << IsArgument << Ty <<
"evex512";
1535 "avx512f", IsArgument);
1540 const llvm::StringMap<bool> &CallerMap,
1541 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
1555void X86_64TargetCodeGenInfo::checkFunctionCallABI(
CodeGenModule &CGM,
1564 llvm::StringMap<bool> CallerMap;
1565 llvm::StringMap<bool> CalleeMap;
1566 unsigned ArgIndex = 0;
1570 for (
const CallArg &Arg : Args) {
1578 if (Arg.getType()->isVectorType() &&
1584 if (ArgIndex < Callee->getNumParams())
1585 Ty =
Callee->getParamDecl(ArgIndex)->getType();
1588 CalleeMap, Ty,
true))
1596 if (
Callee->getReturnType()->isVectorType() &&
1600 CalleeMap,
Callee->getReturnType(),
1609 bool Quote = Lib.contains(
' ');
1610 std::string ArgStr = Quote ?
"\"" :
"";
1612 if (!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1614 ArgStr += Quote ?
"\"" :
"";
1619class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1622 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1623 unsigned NumRegisterParameters)
1624 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1625 Win32StructABI, NumRegisterParameters,
false) {}
1627 void setTargetAttributes(
const Decl *
D, llvm::GlobalValue *GV,
1630 void getDependentLibraryOption(llvm::StringRef Lib,
1632 Opt =
"/DEFAULTLIB:";
1633 Opt += qualifyWindowsLibrary(Lib);
1636 void getDetectMismatchOption(llvm::StringRef Name,
1637 llvm::StringRef
Value,
1639 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1644void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1646 X86_32TargetCodeGenInfo::setTargetAttributes(
D, GV, CGM);
1647 if (GV->isDeclaration())
1649 addStackProbeTargetAttributes(
D, GV, CGM);
1659 std::make_unique<SwiftABIInfo>(CGT,
true);
1662 void setTargetAttributes(
const Decl *
D, llvm::GlobalValue *GV,
1670 llvm::Value *
Address)
const override {
1671 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1679 void getDependentLibraryOption(llvm::StringRef Lib,
1681 Opt =
"/DEFAULTLIB:";
1682 Opt += qualifyWindowsLibrary(Lib);
1685 void getDetectMismatchOption(llvm::StringRef Name,
1686 llvm::StringRef
Value,
1688 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1693void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1696 if (GV->isDeclaration())
1698 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1699 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1700 llvm::Function *
Fn = cast<llvm::Function>(GV);
1701 Fn->addFnAttr(
"stackrealign");
1707 addStackProbeTargetAttributes(
D, GV, CGM);
1710void X86_64ABIInfo::postMerge(
unsigned AggregateSize,
Class &Lo,
1735 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1737 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1739 if (Hi == SSEUp && Lo != SSE)
1743X86_64ABIInfo::Class X86_64ABIInfo::merge(
Class Accum,
Class Field) {
1767 assert((Accum != Memory && Accum != ComplexX87) &&
1768 "Invalid accumulated classification during merge.");
1769 if (Accum == Field || Field == NoClass)
1771 if (Field == Memory)
1773 if (Accum == NoClass)
1777 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1778 Accum == X87 || Accum == X87Up)
1783void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
Class &Lo,
1784 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
1795 Class &Current = OffsetBase < 64 ? Lo : Hi;
1801 if (k == BuiltinType::Void) {
1803 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1806 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1808 }
else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1809 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1811 }
else if (k == BuiltinType::Float128) {
1814 }
else if (k == BuiltinType::LongDouble) {
1815 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1816 if (LDF == &llvm::APFloat::IEEEquad()) {
1819 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1822 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
1825 llvm_unreachable(
"unexpected long double representation!");
1834 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1845 if (Has64BitPointers) {
1852 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1853 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1854 if (EB_FuncPtr != EB_ThisAdj) {
1868 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1877 uint64_t EB_Lo = (OffsetBase) / 64;
1881 }
else if (Size == 64) {
1882 QualType ElementType = VT->getElementType();
1891 if (!classifyIntegerMMXAsSSE() &&
1902 if (OffsetBase && OffsetBase != 64)
1904 }
else if (Size == 128 ||
1905 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1906 QualType ElementType = VT->getElementType();
1909 if (passInt128VectorsInMem() &&
Size != 128 &&
1941 else if (Size <= 128)
1943 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
1946 }
else if (ET == getContext().DoubleTy) {
1948 }
else if (ET == getContext().LongDoubleTy) {
1949 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1950 if (LDF == &llvm::APFloat::IEEEquad())
1952 else if (LDF == &llvm::APFloat::x87DoubleExtended())
1953 Current = ComplexX87;
1954 else if (LDF == &llvm::APFloat::IEEEdouble())
1957 llvm_unreachable(
"unexpected long double representation!");
1962 uint64_t EB_Real = (OffsetBase) / 64;
1963 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1964 if (Hi == NoClass && EB_Real != EB_Imag)
1971 if (EITy->getNumBits() <= 64)
1973 else if (EITy->getNumBits() <= 128)
1988 if (!IsRegCall && Size > 512)
1995 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2001 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2002 uint64_t ArraySize = AT->getZExtSize();
2009 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2012 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2013 Class FieldLo, FieldHi;
2014 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2015 Lo = merge(Lo, FieldLo);
2016 Hi = merge(Hi, FieldHi);
2017 if (Lo == Memory || Hi == Memory)
2021 postMerge(Size, Lo, Hi);
2022 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2052 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2053 for (
const auto &I : CXXRD->bases()) {
2054 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2055 "Unexpected base class!");
2064 Class FieldLo, FieldHi;
2067 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2068 Lo = merge(Lo, FieldLo);
2069 Hi = merge(Hi, FieldHi);
2070 if (Lo == Memory || Hi == Memory) {
2071 postMerge(Size, Lo, Hi);
2079 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2081 getContext().getTargetInfo().getTriple().isPS();
2082 bool IsUnion = RT->
isUnionType() && !UseClang11Compat;
2085 i != e; ++i, ++idx) {
2087 bool BitField = i->isBitField();
2090 if (BitField && i->isUnnamedBitField())
2103 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2104 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2106 postMerge(Size, Lo, Hi);
2111 Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
2113 if (!BitField && IsInMemory) {
2115 postMerge(Size, Lo, Hi);
2125 Class FieldLo, FieldHi;
2131 assert(!i->isUnnamedBitField());
2139 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2144 FieldHi = EB_Hi ?
Integer : NoClass;
2147 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2148 Lo = merge(Lo, FieldLo);
2149 Hi = merge(Hi, FieldHi);
2150 if (Lo == Memory || Hi == Memory)
2154 postMerge(Size, Lo, Hi);
2164 Ty = EnumTy->getDecl()->getIntegerType();
2167 return getNaturalAlignIndirect(Ty);
2173 return getNaturalAlignIndirect(Ty);
2176bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2179 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2180 if (Size <= 64 || Size > LargestVector)
2182 QualType EltTy = VecTy->getElementType();
2183 if (passInt128VectorsInMem() &&
2193 unsigned freeIntRegs)
const {
2206 Ty = EnumTy->getDecl()->getIntegerType();
2217 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2240 if (freeIntRegs == 0) {
2245 if (Align == 8 && Size <= 64)
2255llvm::Type *X86_64ABIInfo::GetByteVectorType(
QualType Ty)
const {
2261 llvm::Type *IRType = CGT.ConvertType(Ty);
2262 if (isa<llvm::VectorType>(IRType)) {
2265 if (passInt128VectorsInMem() &&
2266 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
2269 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2276 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2281 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2285 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2302 if (TySize <= StartBit)
2307 unsigned NumElts = (
unsigned)AT->getZExtSize();
2310 for (
unsigned i = 0; i != NumElts; ++i) {
2312 unsigned EltOffset = i*EltSize;
2313 if (EltOffset >= EndBit)
break;
2315 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2317 EndBit-EltOffset, Context))
2329 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2330 for (
const auto &I : CXXRD->bases()) {
2331 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2332 "Unexpected base class!");
2338 if (BaseOffset >= EndBit)
continue;
2340 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2342 EndBit-BaseOffset, Context))
2353 i != e; ++i, ++idx) {
2357 if (FieldOffset >= EndBit)
break;
2359 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2375 const llvm::DataLayout &TD) {
2376 if (IROffset == 0 && IRType->isFloatingPointTy())
2380 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2381 if (!STy->getNumContainedTypes())
2384 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2385 unsigned Elt = SL->getElementContainingOffset(IROffset);
2386 IROffset -= SL->getElementOffset(Elt);
2391 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2392 llvm::Type *EltTy = ATy->getElementType();
2393 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2394 IROffset -= IROffset / EltSize * EltSize;
2403llvm::Type *X86_64ABIInfo::
2404GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2405 QualType SourceTy,
unsigned SourceOffset)
const {
2406 const llvm::DataLayout &TD = getDataLayout();
2407 unsigned SourceSize =
2408 (
unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2410 if (!T0 || T0->isDoubleTy())
2411 return llvm::Type::getDoubleTy(getVMContext());
2414 llvm::Type *T1 =
nullptr;
2415 unsigned T0Size = TD.getTypeAllocSize(T0);
2416 if (SourceSize > T0Size)
2418 if (T1 ==
nullptr) {
2421 if (T0->is16bitFPTy() && SourceSize > 4)
2430 if (T0->isFloatTy() && T1->isFloatTy())
2431 return llvm::FixedVectorType::get(T0, 2);
2433 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2434 llvm::Type *T2 =
nullptr;
2438 return llvm::FixedVectorType::get(T0, 2);
2439 return llvm::FixedVectorType::get(T0, 4);
2442 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2443 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2445 return llvm::Type::getDoubleTy(getVMContext());
2463llvm::Type *X86_64ABIInfo::
2464GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2465 QualType SourceTy,
unsigned SourceOffset)
const {
2468 if (IROffset == 0) {
2470 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2471 IRType->isIntegerTy(64))
2480 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2481 IRType->isIntegerTy(32) ||
2482 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2483 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2484 cast<llvm::IntegerType>(IRType)->getBitWidth();
2487 SourceOffset*8+64, getContext()))
2492 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2494 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2495 if (IROffset < SL->getSizeInBytes()) {
2496 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2497 IROffset -= SL->getElementOffset(FieldIdx);
2499 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2500 SourceTy, SourceOffset);
2504 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2505 llvm::Type *EltTy = ATy->getElementType();
2506 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2507 unsigned EltOffset = IROffset/EltSize*EltSize;
2508 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2514 unsigned TySizeInBytes =
2515 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2517 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2521 return llvm::IntegerType::get(getVMContext(),
2522 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2533 const llvm::DataLayout &TD) {
2538 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2539 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2540 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2541 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2553 if (Lo->isHalfTy() || Lo->isFloatTy())
2554 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2556 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2557 &&
"Invalid/unknown lo type");
2558 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2562 llvm::StructType *
Result = llvm::StructType::get(Lo, Hi);
2565 assert(TD.getStructLayout(
Result)->getElementOffset(1) == 8 &&
2566 "Invalid x86-64 argument pair!");
2571classifyReturnType(
QualType RetTy)
const {
2574 X86_64ABIInfo::Class Lo, Hi;
2575 classify(RetTy, 0, Lo, Hi,
true);
2578 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2579 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2581 llvm::Type *ResType =
nullptr;
2588 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2589 "Unknown missing lo part");
2594 llvm_unreachable(
"Invalid classification for lo word.");
2599 return getIndirectReturnResult(RetTy);
2604 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2608 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2611 RetTy = EnumTy->getDecl()->getIntegerType();
2614 isPromotableIntegerTypeForABI(RetTy))
2622 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2628 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2635 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2636 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2637 llvm::Type::getX86_FP80Ty(getVMContext()));
2641 llvm::Type *HighPart =
nullptr;
2647 llvm_unreachable(
"Invalid classification for hi word.");
2654 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2659 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2670 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2671 ResType = GetByteVectorType(RetTy);
2682 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2699X86_64ABIInfo::classifyArgumentType(
QualType Ty,
unsigned freeIntRegs,
2700 unsigned &neededInt,
unsigned &neededSSE,
2701 bool isNamedArg,
bool IsRegCall)
const {
2704 X86_64ABIInfo::Class Lo, Hi;
2705 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2709 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2710 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2714 llvm::Type *ResType =
nullptr;
2721 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2722 "Unknown missing lo part");
2735 return getIndirectResult(Ty, freeIntRegs);
2739 llvm_unreachable(
"Invalid classification for lo word.");
2748 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2752 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2755 Ty = EnumTy->getDecl()->getIntegerType();
2758 isPromotableIntegerTypeForABI(Ty))
2768 llvm::Type *IRType = CGT.ConvertType(Ty);
2769 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2775 llvm::Type *HighPart =
nullptr;
2783 llvm_unreachable(
"Invalid classification for hi word.");
2785 case NoClass:
break;
2790 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2801 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2811 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2812 ResType = GetByteVectorType(Ty);
2826X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
2827 unsigned &NeededSSE,
2828 unsigned &MaxVectorWidth)
const {
2830 assert(RT &&
"classifyRegCallStructType only valid with struct types");
2833 return getIndirectReturnResult(Ty);
2836 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->
getDecl())) {
2837 if (CXXRD->isDynamicClass()) {
2838 NeededInt = NeededSSE = 0;
2839 return getIndirectReturnResult(Ty);
2842 for (
const auto &I : CXXRD->bases())
2843 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2846 NeededInt = NeededSSE = 0;
2847 return getIndirectReturnResult(Ty);
2855 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2858 NeededInt = NeededSSE = 0;
2859 return getIndirectReturnResult(Ty);
2862 unsigned LocalNeededInt, LocalNeededSSE;
2866 NeededInt = NeededSSE = 0;
2867 return getIndirectReturnResult(Ty);
2869 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
2870 MTy = AT->getElementType();
2872 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2873 MaxVectorWidth = getContext().getTypeSize(VT);
2874 NeededInt += LocalNeededInt;
2875 NeededSSE += LocalNeededSSE;
2883X86_64ABIInfo::classifyRegCallStructType(
QualType Ty,
unsigned &NeededInt,
2884 unsigned &NeededSSE,
2885 unsigned &MaxVectorWidth)
const {
2891 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2902 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2903 Win64ABIInfo.computeInfo(FI);
2907 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
2910 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2911 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2912 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2919 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2920 FreeIntRegs -= NeededInt;
2921 FreeSSERegs -= NeededSSE;
2929 getContext().LongDoubleTy)
2941 else if (NeededSSE && MaxVectorWidth > 0)
2953 it != ie; ++it, ++ArgNo) {
2954 bool IsNamedArg = ArgNo < NumRequiredArgs;
2956 if (IsRegCall && it->type->isStructureOrClassType())
2957 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2961 NeededSSE, IsNamedArg);
2967 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2968 FreeIntRegs -= NeededInt;
2969 FreeSSERegs -= NeededSSE;
2973 it->info = getIndirectResult(it->type, FreeIntRegs);
2982 llvm::Value *overflow_arg_area =
2997 llvm::Value *Res = overflow_arg_area;
3005 llvm::Value *Offset =
3006 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3008 Offset,
"overflow_arg_area.next");
3012 return Address(Res, LTy, Align);
3024 unsigned neededInt, neededSSE;
3036 if (!neededInt && !neededSSE)
3052 llvm::Value *InRegs =
nullptr;
3054 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3058 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3059 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3065 llvm::Value *FitsInFP =
3066 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3067 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3068 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3074 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3095 if (neededInt && neededSSE) {
3097 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3101 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3102 llvm::Type *TyLo = ST->getElementType(0);
3103 llvm::Type *TyHi = ST->getElementType(1);
3104 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3105 "Unexpected ABI info for mixed regs");
3106 llvm::Value *GPAddr =
3108 llvm::Value *FPAddr =
3110 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3111 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3127 }
else if (neededInt || neededSSE == 1) {
3129 auto TInfo = getContext().getTypeInfoInChars(Ty);
3130 uint64_t TySize = TInfo.Width.getQuantity();
3132 llvm::Type *CoTy =
nullptr;
3136 llvm::Value *GpOrFpOffset = neededInt ? gp_offset : fp_offset;
3137 uint64_t Alignment = neededInt ? 8 : 16;
3138 uint64_t RegSize = neededInt ? neededInt * 8 : 16;
3163 llvm::Value *PtrOffset =
3185 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3218 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3223 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3246 uint64_t Width = getContext().getTypeSize(Ty);
3247 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3255ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3261 isHomogeneousAggregate(Ty,
Base, NumElts) && FreeSSERegs >= NumElts) {
3262 FreeSSERegs -= NumElts;
3263 return getDirectX86Hva();
3269 bool IsReturnType,
bool IsVectorCall,
3270 bool IsRegCall)
const {
3276 Ty = EnumTy->getDecl()->getIntegerType();
3278 TypeInfo Info = getContext().getTypeInfo(Ty);
3284 if (!IsReturnType) {
3290 return getNaturalAlignIndirect(Ty,
false);
3298 if ((IsVectorCall || IsRegCall) &&
3299 isHomogeneousAggregate(Ty,
Base, NumElts)) {
3301 if (FreeSSERegs >= NumElts) {
3302 FreeSSERegs -= NumElts;
3308 }
else if (IsVectorCall) {
3309 if (FreeSSERegs >= NumElts &&
3311 FreeSSERegs -= NumElts;
3313 }
else if (IsReturnType) {
3325 llvm::Type *LLTy = CGT.ConvertType(Ty);
3326 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3333 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3334 return getNaturalAlignIndirect(Ty,
false);
3341 switch (BT->getKind()) {
3342 case BuiltinType::Bool:
3347 case BuiltinType::LongDouble:
3351 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3352 if (LDF == &llvm::APFloat::x87DoubleExtended())
3357 case BuiltinType::Int128:
3358 case BuiltinType::UInt128:
3368 llvm::Type::getInt64Ty(getVMContext()), 2));
3391 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3392 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3396 if (CC == llvm::CallingConv::X86_64_SysV) {
3397 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3398 SysVABIInfo.computeInfo(FI);
3402 unsigned FreeSSERegs = 0;
3406 }
else if (IsRegCall) {
3413 IsVectorCall, IsRegCall);
3418 }
else if (IsRegCall) {
3423 unsigned ArgNum = 0;
3424 unsigned ZeroSSERegs = 0;
3429 unsigned *MaybeFreeSSERegs =
3430 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3432 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3440 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3448 uint64_t Width = getContext().getTypeSize(Ty);
3449 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3458 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3459 unsigned NumRegisterParameters,
bool SoftFloatABI) {
3460 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3462 return std::make_unique<X86_32TargetCodeGenInfo>(
3463 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3464 NumRegisterParameters, SoftFloatABI);
3468 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3469 unsigned NumRegisterParameters) {
3470 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3472 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3473 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3474 NumRegisterParameters);
3477std::unique_ptr<TargetCodeGenInfo>
3480 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3483std::unique_ptr<TargetCodeGenInfo>
3486 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const TargetInfo & getTargetInfo() const
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
unsigned getDirectOffset() const
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
ASTContext & getContext() const
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
const TargetInfo & getTarget() const
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CallArgList - Type for representing both the value and type of arguments in a call.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::Type * ConvertTypeForMem(QualType T)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
ASTContext & getContext() const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
llvm::LLVMContext & getLLVMContext()
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Address getAddress() const
void setAddress(Address address)
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
A class for recording the number of arguments that a function signature requires.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
bool occupiesMoreThan(ArrayRef< llvm::Type * > scalarTypes, unsigned maxAllRegisters) const
Does the given lowering require more than the given number of registers when expanded?
virtual bool shouldPassIndirectly(ArrayRef< llvm::Type * > ComponentTys, bool AsReturnValue) const
Returns true if an aggregate which expands to the given type sequence should be passed / returned ind...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
virtual llvm::Type * adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type *Ty) const
Corrects the low-level LLVM type for a given constraint and "usual" type.
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Initializes the given DWARF EH register-size table, a char*.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
virtual bool markARCOptimizedReturnCallsAsNoTail() const
Determine whether a call to objc_retainAutoreleasedReturnValue or objc_unsafeClaimAutoreleasedReturnV...
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
QualType getElementType() const
Represents the canonical version of C arrays with a specified constant size.
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
CallingConv getCallConv() const
@ Ver11
Attempt to be ABI-compatible with code generated by Clang 11.0.x (git 2e10b7a39b93).
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
const llvm::fltSemantics & getLongDoubleFormat() const
The base class of the type hierarchy.
bool isBlockPointerType() const
bool isFloat16Type() const
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
The JSON file list parser is used to communicate input to InstallAPI.
@ Result
The result type of a method or function.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
@ Class
The "class" keyword introduces the elaborated-type-specifier.
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty