10#include "TargetInfo.h"
12#include "llvm/ADT/SmallBitVector.h"
20bool IsX86_MMXType(llvm::Type *IRType) {
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
23 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
24 IRType->getScalarSizeInBits() != 64;
30 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
31 .Cases(
"y",
"&y",
"^Ym",
true)
33 if (IsMMXCons && Ty->isVectorTy()) {
34 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() !=
51 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
52 if (BT->getKind() == BuiltinType::LongDouble) {
54 &llvm::APFloat::x87DoubleExtended())
63 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
71static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
72 return NumMembers <= 4;
76static ABIArgInfo getDirectX86Hva(llvm::Type* T =
nullptr) {
79 AI.setCanBeFlattened(
false);
90 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
93 llvm::SmallBitVector IsPreassigned;
94 unsigned CC = CallingConv::CC_C;
95 unsigned FreeRegs = 0;
96 unsigned FreeSSERegs = 0;
102class X86_32ABIInfo :
public ABIInfo {
108 static const unsigned MinABIStackAlignInBytes = 4;
110 bool IsDarwinVectorABI;
111 bool IsRetSmallStructInRegABI;
112 bool IsWin32StructABI;
116 unsigned DefaultNumRegisterParameters;
118 static bool isRegisterSize(
unsigned Size) {
119 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
124 return isX86VectorTypeForVectorCall(
getContext(), Ty);
128 uint64_t NumMembers)
const override {
130 return isX86VectorCallAggregateSmallEnough(NumMembers);
142 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
147 unsigned ArgIndex)
const;
151 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
153 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
154 bool &NeedsPadding)
const;
155 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
157 bool canExpandIndirectArgument(
QualType Ty)
const;
166 void runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const;
175 bool RetSmallStructInRegABI,
bool Win32StructABI,
176 unsigned NumRegisterParameters,
bool SoftFloatABI)
177 :
ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
178 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
179 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
180 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
181 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
182 CGT.getTarget().getTriple().isOSCygMing()),
183 DefaultNumRegisterParameters(NumRegisterParameters) {}
192 bool AsReturnValue)
const override {
204 bool RetSmallStructInRegABI,
bool Win32StructABI,
205 unsigned NumRegisterParameters,
bool SoftFloatABI)
207 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
208 NumRegisterParameters, SoftFloatABI)) {
209 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
212 static bool isStructReturnInRegABI(
225 llvm::Value *
Address)
const override;
228 StringRef Constraint,
229 llvm::Type* Ty)
const override {
230 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
234 std::string &Constraints,
235 std::vector<llvm::Type *> &ResultRegTypes,
236 std::vector<llvm::Type *> &ResultTruncRegTypes,
237 std::vector<LValue> &ResultRegDests,
238 std::string &AsmString,
239 unsigned NumOutputs)
const override;
242 return "movl\t%ebp, %ebp"
243 "\t\t// marker for objc_retainAutoreleaseReturnValue";
259 std::string &AsmString) {
261 llvm::raw_string_ostream OS(Buf);
263 while (Pos < AsmString.size()) {
264 size_t DollarStart = AsmString.find(
'$', Pos);
265 if (DollarStart == std::string::npos)
266 DollarStart = AsmString.size();
267 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
268 if (DollarEnd == std::string::npos)
269 DollarEnd = AsmString.size();
270 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
272 size_t NumDollars = DollarEnd - DollarStart;
273 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
275 size_t DigitStart = Pos;
276 if (AsmString[DigitStart] ==
'{') {
280 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
281 if (DigitEnd == std::string::npos)
282 DigitEnd = AsmString.size();
283 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
284 unsigned OperandIndex;
285 if (!OperandStr.getAsInteger(10, OperandIndex)) {
286 if (OperandIndex >= FirstIn)
287 OperandIndex += NumNewOuts;
295 AsmString = std::move(OS.str());
299void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
301 std::vector<llvm::Type *> &ResultRegTypes,
302 std::vector<llvm::Type *> &ResultTruncRegTypes,
303 std::vector<LValue> &ResultRegDests, std::string &AsmString,
304 unsigned NumOutputs)
const {
309 if (!Constraints.empty())
311 if (RetWidth <= 32) {
312 Constraints +=
"={eax}";
313 ResultRegTypes.push_back(CGF.
Int32Ty);
317 ResultRegTypes.push_back(CGF.
Int64Ty);
321 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
322 ResultTruncRegTypes.push_back(CoerceTy);
326 ResultRegDests.push_back(ReturnSlot);
333bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
339 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
345 if (Size == 64 || Size == 128)
360 return shouldReturnTypeInRegister(AT->getElementType(), Context);
364 if (!RT)
return false;
376 if (!shouldReturnTypeInRegister(FD->getType(), Context))
385 Ty = CTy->getElementType();
395 return Size == 32 || Size == 64;
400 for (
const auto *FD : RD->
fields()) {
410 if (FD->isBitField())
435bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
442 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
443 if (!IsWin32StructABI) {
446 if (!CXXRD->isCLike())
450 if (CXXRD->isDynamicClass())
461 return Size == getContext().getTypeSize(Ty);
464ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(
QualType RetTy, CCState &State)
const {
467 if (State.FreeRegs) {
470 return getNaturalAlignIndirectInReg(RetTy);
472 return getNaturalAlignIndirect(RetTy,
false);
476 CCState &State)
const {
482 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
483 State.CC == llvm::CallingConv::X86_RegCall) &&
484 isHomogeneousAggregate(RetTy,
Base, NumElts)) {
491 if (IsDarwinVectorABI) {
499 llvm::Type::getInt64Ty(getVMContext()), 2));
503 if ((Size == 8 || Size == 16 || Size == 32) ||
504 (Size == 64 && VT->getNumElements() == 1))
508 return getIndirectReturnResult(RetTy, State);
518 return getIndirectReturnResult(RetTy, State);
523 return getIndirectReturnResult(RetTy, State);
534 llvm::Type::getHalfTy(getVMContext()), 2));
539 if (shouldReturnTypeInRegister(RetTy, getContext())) {
548 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
549 || SeltTy->hasPointerRepresentation())
557 return getIndirectReturnResult(RetTy, State);
562 RetTy = EnumTy->getDecl()->getIntegerType();
565 if (EIT->getNumBits() > 64)
566 return getIndirectReturnResult(RetTy, State);
572unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
573 unsigned Align)
const {
576 if (Align <= MinABIStackAlignInBytes)
584 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
588 if (!IsDarwinVectorABI) {
590 return MinABIStackAlignInBytes;
598 return MinABIStackAlignInBytes;
602 CCState &State)
const {
604 if (State.FreeRegs) {
607 return getNaturalAlignIndirectInReg(Ty);
609 return getNaturalAlignIndirect(Ty,
false);
613 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
614 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
620 bool Realign = TypeAlign > StackAlign;
625X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
632 if (K == BuiltinType::Float || K == BuiltinType::Double)
638bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
639 if (!IsSoftFloatABI) {
645 unsigned Size = getContext().getTypeSize(Ty);
646 unsigned SizeInRegs = (
Size + 31) / 32;
652 if (SizeInRegs > State.FreeRegs) {
661 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
665 State.FreeRegs -= SizeInRegs;
669bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
671 bool &NeedsPadding)
const {
678 NeedsPadding =
false;
681 if (!updateFreeRegs(Ty, State))
687 if (State.CC == llvm::CallingConv::X86_FastCall ||
688 State.CC == llvm::CallingConv::X86_VectorCall ||
689 State.CC == llvm::CallingConv::X86_RegCall) {
690 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
699bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
700 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
704 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
705 State.CC == llvm::CallingConv::X86_VectorCall))
708 if (!updateFreeRegs(Ty, State))
711 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
718void X86_32ABIInfo::runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const {
729 for (
int I = 0, E = Args.size(); I < E; ++I) {
734 isHomogeneousAggregate(Ty,
Base, NumElts)) {
735 if (State.FreeSSERegs >= NumElts) {
736 State.FreeSSERegs -= NumElts;
738 State.IsPreassigned.set(I);
745 unsigned ArgIndex)
const {
747 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
748 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
749 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
752 TypeInfo TI = getContext().getTypeInfo(Ty);
759 return getIndirectResult(Ty,
false, State);
760 }
else if (State.IsDelegateCall) {
763 ABIArgInfo Res = getIndirectResult(Ty,
false, State);
776 if ((IsRegCall || IsVectorCall) &&
777 isHomogeneousAggregate(Ty,
Base, NumElts)) {
778 if (State.FreeSSERegs >= NumElts) {
779 State.FreeSSERegs -= NumElts;
784 return getDirectX86Hva();
790 return getIndirectResult(Ty,
false, State);
797 return getIndirectResult(Ty,
true, State);
800 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
803 llvm::LLVMContext &LLVMContext = getVMContext();
804 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
805 bool NeedsPadding =
false;
807 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
808 unsigned SizeInRegs = (TI.
Width + 31) / 32;
810 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
816 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
823 if (IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) {
824 unsigned AlignInBits = 0;
827 getContext().getASTRecordLayout(RT->
getDecl());
830 AlignInBits = TI.
Align;
832 if (AlignInBits > 32)
833 return getIndirectResult(Ty,
false, State);
842 if (TI.
Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
843 canExpandIndirectArgument(Ty))
845 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
847 return getIndirectResult(Ty,
true, State);
854 if (IsWin32StructABI) {
855 if (TI.
Width <= 512 && State.FreeSSERegs > 0) {
859 return getIndirectResult(Ty,
false, State);
864 if (IsDarwinVectorABI) {
866 (TI.
Width == 64 && VT->getNumElements() == 1))
868 llvm::IntegerType::get(getVMContext(), TI.
Width));
871 if (IsX86_MMXType(CGT.ConvertType(Ty)))
879 Ty = EnumTy->getDecl()->getIntegerType();
881 bool InReg = shouldPrimitiveUseInReg(Ty, State);
883 if (isPromotableIntegerTypeForABI(Ty)) {
890 if (EIT->getNumBits() <= 64) {
895 return getIndirectResult(Ty,
false, State);
907 else if (State.CC == llvm::CallingConv::X86_FastCall) {
909 State.FreeSSERegs = 3;
910 }
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
912 State.FreeSSERegs = 6;
915 else if (State.CC == llvm::CallingConv::X86_RegCall) {
917 State.FreeSSERegs = 8;
918 }
else if (IsWin32StructABI) {
921 State.FreeRegs = DefaultNumRegisterParameters;
922 State.FreeSSERegs = 3;
924 State.FreeRegs = DefaultNumRegisterParameters;
931 if (State.FreeRegs) {
944 if (State.CC == llvm::CallingConv::X86_VectorCall)
945 runVectorCallFirstPass(FI, State);
947 bool UsedInAlloca =
false;
949 for (
unsigned I = 0, E = Args.size(); I < E; ++I) {
951 if (State.IsPreassigned.test(I))
962 rewriteWithInAlloca(FI);
971 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
976 bool IsIndirect =
false;
980 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
982 LLTy = llvm::PointerType::getUnqual(getVMContext());
983 FrameFields.push_back(LLTy);
984 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
988 StackOffset = FieldEnd.
alignTo(WordSize);
989 if (StackOffset != FieldEnd) {
990 CharUnits NumBytes = StackOffset - FieldEnd;
991 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
992 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
993 FrameFields.push_back(Ty);
1015 llvm_unreachable(
"invalid enum");
1018void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1019 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1034 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1036 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1041 if (
Ret.isIndirect() && !
Ret.getInReg()) {
1042 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
1044 Ret.setInAllocaSRet(IsWin32StructABI);
1052 for (; I != E; ++I) {
1054 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1057 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1065 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1072 getTypeStackAlignInBytes(Ty,
TypeInfo.
Align.getQuantity()));
1079bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1081 assert(Triple.getArch() == llvm::Triple::x86);
1083 switch (Opts.getStructReturnConvention()) {
1092 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1095 switch (Triple.getOS()) {
1096 case llvm::Triple::DragonFly:
1097 case llvm::Triple::FreeBSD:
1098 case llvm::Triple::OpenBSD:
1099 case llvm::Triple::Win32:
1108 if (!FD->
hasAttr<AnyX86InterruptAttr>())
1111 llvm::Function *Fn = cast<llvm::Function>(GV);
1112 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1118 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1119 Fn->getContext(), ByValTy);
1120 Fn->addParamAttr(0, NewAttr);
1123void X86_32TargetCodeGenInfo::setTargetAttributes(
1125 if (GV->isDeclaration())
1127 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1128 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1129 llvm::Function *Fn = cast<llvm::Function>(GV);
1130 Fn->addFnAttr(
"stackrealign");
1137bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1142 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1153 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1159 Builder.CreateAlignedStore(
1160 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty,
Address, 9),
1166 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1181static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1183 case X86AVXABILevel::AVX512:
1185 case X86AVXABILevel::AVX:
1187 case X86AVXABILevel::None:
1190 llvm_unreachable(
"Unknown AVXLevel");
1194class X86_64ABIInfo :
public ABIInfo {
1215 static Class merge(Class Accum, Class Field);
1231 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
1259 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1260 bool isNamedArg,
bool IsRegCall =
false)
const;
1262 llvm::Type *GetByteVectorType(
QualType Ty)
const;
1263 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1264 unsigned IROffset,
QualType SourceTy,
1265 unsigned SourceOffset)
const;
1266 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1267 unsigned IROffset,
QualType SourceTy,
1268 unsigned SourceOffset)
const;
1284 unsigned &neededInt,
unsigned &neededSSE,
1286 bool IsRegCall =
false)
const;
1289 unsigned &NeededSSE,
1290 unsigned &MaxVectorWidth)
const;
1293 unsigned &NeededSSE,
1294 unsigned &MaxVectorWidth)
const;
1296 bool IsIllegalVectorType(
QualType Ty)
const;
1303 bool honorsRevision0_98()
const {
1309 bool classifyIntegerMMXAsSSE()
const {
1311 if (
getContext().getLangOpts().getClangABICompat() <=
1312 LangOptions::ClangABI::Ver3_8)
1316 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1322 bool passInt128VectorsInMem()
const {
1324 if (
getContext().getLangOpts().getClangABICompat() <=
1325 LangOptions::ClangABI::Ver9)
1329 return T.isOSLinux() || T.isOSNetBSD();
1335 bool Has64BitPointers;
1339 :
ABIInfo(CGT), AVXLevel(AVXLevel),
1340 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1343 unsigned neededInt, neededSSE;
1349 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1350 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1362 bool has64BitPointers()
const {
1363 return Has64BitPointers;
1368class WinX86_64ABIInfo :
public ABIInfo {
1371 :
ABIInfo(CGT), AVXLevel(AVXLevel),
1372 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1381 return isX86VectorTypeForVectorCall(
getContext(), Ty);
1385 uint64_t NumMembers)
const override {
1387 return isX86VectorCallAggregateSmallEnough(NumMembers);
1392 bool IsVectorCall,
bool IsRegCall)
const;
1406 std::make_unique<SwiftABIInfo>(CGT,
true);
1418 llvm::Value *
Address)
const override {
1419 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1428 StringRef Constraint,
1429 llvm::Type* Ty)
const override {
1430 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1442 bool HasAVXType =
false;
1443 for (CallArgList::const_iterator
1444 it = args.begin(), ie = args.end(); it != ie; ++it) {
1445 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
1460 if (GV->isDeclaration())
1462 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1463 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1464 llvm::Function *Fn = cast<llvm::Function>(GV);
1465 Fn->addFnAttr(
"stackrealign");
1480 llvm::StringMap<bool> &CallerMap,
1482 llvm::StringMap<bool> &CalleeMap,
1484 if (CalleeMap.empty() && CallerMap.empty()) {
1495 const llvm::StringMap<bool> &CallerMap,
1496 const llvm::StringMap<bool> &CalleeMap,
1499 bool CallerHasFeat = CallerMap.lookup(Feature);
1500 bool CalleeHasFeat = CalleeMap.lookup(Feature);
1501 if (!CallerHasFeat && !CalleeHasFeat)
1502 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1503 << IsArgument << Ty << Feature;
1506 if (!CallerHasFeat || !CalleeHasFeat)
1507 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1508 << IsArgument << Ty << Feature;
1517 const llvm::StringMap<bool> &CallerMap,
1518 const llvm::StringMap<bool> &CalleeMap,
1520 bool Caller256 = CallerMap.lookup(
"avx512f") && !CallerMap.lookup(
"evex512");
1521 bool Callee256 = CalleeMap.lookup(
"avx512f") && !CalleeMap.lookup(
"evex512");
1525 if (Caller256 || Callee256)
1526 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1527 << IsArgument << Ty <<
"evex512";
1530 "avx512f", IsArgument);
1535 const llvm::StringMap<bool> &CallerMap,
1536 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
1550void X86_64TargetCodeGenInfo::checkFunctionCallABI(
1553 llvm::StringMap<bool> CallerMap;
1554 llvm::StringMap<bool> CalleeMap;
1555 unsigned ArgIndex = 0;
1559 for (
const CallArg &Arg : Args) {
1567 if (Arg.getType()->isVectorType() &&
1573 if (ArgIndex < Callee->getNumParams())
1574 Ty =
Callee->getParamDecl(ArgIndex)->getType();
1577 CalleeMap, Ty,
true))
1585 if (
Callee->getReturnType()->isVectorType() &&
1589 CalleeMap,
Callee->getReturnType(),
1598 bool Quote = Lib.contains(
' ');
1599 std::string ArgStr = Quote ?
"\"" :
"";
1601 if (!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1603 ArgStr += Quote ?
"\"" :
"";
1608class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1611 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1612 unsigned NumRegisterParameters)
1613 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1614 Win32StructABI, NumRegisterParameters,
false) {}
1616 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1619 void getDependentLibraryOption(llvm::StringRef Lib,
1621 Opt =
"/DEFAULTLIB:";
1622 Opt += qualifyWindowsLibrary(Lib);
1625 void getDetectMismatchOption(llvm::StringRef Name,
1626 llvm::StringRef
Value,
1628 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1633void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1635 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1636 if (GV->isDeclaration())
1638 addStackProbeTargetAttributes(D, GV, CGM);
1648 std::make_unique<SwiftABIInfo>(CGT,
true);
1651 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1659 llvm::Value *
Address)
const override {
1660 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1668 void getDependentLibraryOption(llvm::StringRef Lib,
1670 Opt =
"/DEFAULTLIB:";
1671 Opt += qualifyWindowsLibrary(Lib);
1674 void getDetectMismatchOption(llvm::StringRef Name,
1675 llvm::StringRef
Value,
1677 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1682void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1685 if (GV->isDeclaration())
1687 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1688 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1689 llvm::Function *Fn = cast<llvm::Function>(GV);
1690 Fn->addFnAttr(
"stackrealign");
1696 addStackProbeTargetAttributes(D, GV, CGM);
1699void X86_64ABIInfo::postMerge(
unsigned AggregateSize,
Class &Lo,
1724 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1726 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1728 if (Hi == SSEUp && Lo != SSE)
1732X86_64ABIInfo::Class X86_64ABIInfo::merge(
Class Accum,
Class Field) {
1756 assert((Accum != Memory && Accum != ComplexX87) &&
1757 "Invalid accumulated classification during merge.");
1758 if (Accum == Field || Field == NoClass)
1760 if (Field == Memory)
1762 if (Accum == NoClass)
1766 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1767 Accum == X87 || Accum == X87Up)
1772void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
Class &Lo,
1773 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
1784 Class &Current = OffsetBase < 64 ? Lo : Hi;
1790 if (k == BuiltinType::Void) {
1792 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1795 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1797 }
else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1798 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1800 }
else if (k == BuiltinType::LongDouble) {
1801 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1802 if (LDF == &llvm::APFloat::IEEEquad()) {
1805 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1808 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
1811 llvm_unreachable(
"unexpected long double representation!");
1820 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1831 if (Has64BitPointers) {
1838 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1839 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1840 if (EB_FuncPtr != EB_ThisAdj) {
1854 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1863 uint64_t EB_Lo = (OffsetBase) / 64;
1867 }
else if (Size == 64) {
1868 QualType ElementType = VT->getElementType();
1877 if (!classifyIntegerMMXAsSSE() &&
1888 if (OffsetBase && OffsetBase != 64)
1890 }
else if (Size == 128 ||
1891 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1892 QualType ElementType = VT->getElementType();
1895 if (passInt128VectorsInMem() &&
Size != 128 &&
1927 else if (Size <= 128)
1929 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
1932 }
else if (ET == getContext().DoubleTy) {
1934 }
else if (ET == getContext().LongDoubleTy) {
1935 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1936 if (LDF == &llvm::APFloat::IEEEquad())
1938 else if (LDF == &llvm::APFloat::x87DoubleExtended())
1939 Current = ComplexX87;
1940 else if (LDF == &llvm::APFloat::IEEEdouble())
1943 llvm_unreachable(
"unexpected long double representation!");
1948 uint64_t EB_Real = (OffsetBase) / 64;
1949 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1950 if (Hi == NoClass && EB_Real != EB_Imag)
1957 if (EITy->getNumBits() <= 64)
1959 else if (EITy->getNumBits() <= 128)
1974 if (!IsRegCall && Size > 512)
1981 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1987 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1988 uint64_t ArraySize = AT->getSize().getZExtValue();
1995 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
1998 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1999 Class FieldLo, FieldHi;
2000 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2001 Lo = merge(Lo, FieldLo);
2002 Hi = merge(Hi, FieldHi);
2003 if (Lo == Memory || Hi == Memory)
2007 postMerge(Size, Lo, Hi);
2008 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2038 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2039 for (
const auto &I : CXXRD->bases()) {
2040 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2041 "Unexpected base class!");
2050 Class FieldLo, FieldHi;
2053 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2054 Lo = merge(Lo, FieldLo);
2055 Hi = merge(Hi, FieldHi);
2056 if (Lo == Memory || Hi == Memory) {
2057 postMerge(Size, Lo, Hi);
2065 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2067 getContext().getTargetInfo().getTriple().isPS();
2068 bool IsUnion = RT->
isUnionType() && !UseClang11Compat;
2071 i != e; ++i, ++idx) {
2073 bool BitField = i->isBitField();
2076 if (BitField && i->isUnnamedBitfield())
2089 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2090 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2092 postMerge(Size, Lo, Hi);
2096 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2098 postMerge(Size, Lo, Hi);
2108 Class FieldLo, FieldHi;
2114 assert(!i->isUnnamedBitfield());
2122 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2127 FieldHi = EB_Hi ?
Integer : NoClass;
2130 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2131 Lo = merge(Lo, FieldLo);
2132 Hi = merge(Hi, FieldHi);
2133 if (Lo == Memory || Hi == Memory)
2137 postMerge(Size, Lo, Hi);
2147 Ty = EnumTy->getDecl()->getIntegerType();
2150 return getNaturalAlignIndirect(Ty);
2156 return getNaturalAlignIndirect(Ty);
2159bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2162 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2163 if (Size <= 64 || Size > LargestVector)
2165 QualType EltTy = VecTy->getElementType();
2166 if (passInt128VectorsInMem() &&
2176 unsigned freeIntRegs)
const {
2189 Ty = EnumTy->getDecl()->getIntegerType();
2200 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2223 if (freeIntRegs == 0) {
2228 if (Align == 8 && Size <= 64)
2238llvm::Type *X86_64ABIInfo::GetByteVectorType(
QualType Ty)
const {
2244 llvm::Type *IRType = CGT.ConvertType(Ty);
2245 if (isa<llvm::VectorType>(IRType)) {
2248 if (passInt128VectorsInMem() &&
2249 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
2252 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2259 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2264 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2268 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2285 if (TySize <= StartBit)
2290 unsigned NumElts = (
unsigned)AT->getSize().getZExtValue();
2293 for (
unsigned i = 0; i != NumElts; ++i) {
2295 unsigned EltOffset = i*EltSize;
2296 if (EltOffset >= EndBit)
break;
2298 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2300 EndBit-EltOffset, Context))
2312 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2313 for (
const auto &I : CXXRD->bases()) {
2314 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2315 "Unexpected base class!");
2321 if (BaseOffset >= EndBit)
continue;
2323 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2325 EndBit-BaseOffset, Context))
2336 i != e; ++i, ++idx) {
2340 if (FieldOffset >= EndBit)
break;
2342 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2358 const llvm::DataLayout &TD) {
2359 if (IROffset == 0 && IRType->isFloatingPointTy())
2363 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2364 if (!STy->getNumContainedTypes())
2367 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2368 unsigned Elt = SL->getElementContainingOffset(IROffset);
2369 IROffset -= SL->getElementOffset(Elt);
2374 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2375 llvm::Type *EltTy = ATy->getElementType();
2376 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2377 IROffset -= IROffset / EltSize * EltSize;
2386llvm::Type *X86_64ABIInfo::
2387GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2388 QualType SourceTy,
unsigned SourceOffset)
const {
2389 const llvm::DataLayout &TD = getDataLayout();
2390 unsigned SourceSize =
2391 (
unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2393 if (!T0 || T0->isDoubleTy())
2394 return llvm::Type::getDoubleTy(getVMContext());
2397 llvm::Type *T1 =
nullptr;
2398 unsigned T0Size = TD.getTypeAllocSize(T0);
2399 if (SourceSize > T0Size)
2401 if (T1 ==
nullptr) {
2404 if (T0->is16bitFPTy() && SourceSize > 4)
2413 if (T0->isFloatTy() && T1->isFloatTy())
2414 return llvm::FixedVectorType::get(T0, 2);
2416 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2417 llvm::Type *T2 =
nullptr;
2421 return llvm::FixedVectorType::get(T0, 2);
2422 return llvm::FixedVectorType::get(T0, 4);
2425 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2426 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2428 return llvm::Type::getDoubleTy(getVMContext());
2446llvm::Type *X86_64ABIInfo::
2447GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2448 QualType SourceTy,
unsigned SourceOffset)
const {
2451 if (IROffset == 0) {
2453 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2454 IRType->isIntegerTy(64))
2463 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2464 IRType->isIntegerTy(32) ||
2465 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2466 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2467 cast<llvm::IntegerType>(IRType)->getBitWidth();
2470 SourceOffset*8+64, getContext()))
2475 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2477 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2478 if (IROffset < SL->getSizeInBytes()) {
2479 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2480 IROffset -= SL->getElementOffset(FieldIdx);
2482 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2483 SourceTy, SourceOffset);
2487 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2488 llvm::Type *EltTy = ATy->getElementType();
2489 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2490 unsigned EltOffset = IROffset/EltSize*EltSize;
2491 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2497 unsigned TySizeInBytes =
2498 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2500 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2504 return llvm::IntegerType::get(getVMContext(),
2505 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2516 const llvm::DataLayout &TD) {
2521 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2522 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2523 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2524 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2536 if (Lo->isHalfTy() || Lo->isFloatTy())
2537 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2539 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2540 &&
"Invalid/unknown lo type");
2541 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2545 llvm::StructType *
Result = llvm::StructType::get(Lo, Hi);
2548 assert(TD.getStructLayout(
Result)->getElementOffset(1) == 8 &&
2549 "Invalid x86-64 argument pair!");
2554classifyReturnType(
QualType RetTy)
const {
2557 X86_64ABIInfo::Class Lo, Hi;
2558 classify(RetTy, 0, Lo, Hi,
true);
2561 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2562 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2564 llvm::Type *ResType =
nullptr;
2571 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2572 "Unknown missing lo part");
2577 llvm_unreachable(
"Invalid classification for lo word.");
2582 return getIndirectReturnResult(RetTy);
2587 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2591 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2594 RetTy = EnumTy->getDecl()->getIntegerType();
2597 isPromotableIntegerTypeForABI(RetTy))
2605 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2611 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2618 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2619 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2620 llvm::Type::getX86_FP80Ty(getVMContext()));
2624 llvm::Type *HighPart =
nullptr;
2630 llvm_unreachable(
"Invalid classification for hi word.");
2637 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2642 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2653 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2654 ResType = GetByteVectorType(RetTy);
2665 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2682X86_64ABIInfo::classifyArgumentType(
QualType Ty,
unsigned freeIntRegs,
2683 unsigned &neededInt,
unsigned &neededSSE,
2684 bool isNamedArg,
bool IsRegCall)
const {
2687 X86_64ABIInfo::Class Lo, Hi;
2688 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2692 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2693 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2697 llvm::Type *ResType =
nullptr;
2704 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2705 "Unknown missing lo part");
2718 return getIndirectResult(Ty, freeIntRegs);
2722 llvm_unreachable(
"Invalid classification for lo word.");
2731 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2735 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2738 Ty = EnumTy->getDecl()->getIntegerType();
2741 isPromotableIntegerTypeForABI(Ty))
2751 llvm::Type *IRType = CGT.ConvertType(Ty);
2752 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2758 llvm::Type *HighPart =
nullptr;
2766 llvm_unreachable(
"Invalid classification for hi word.");
2768 case NoClass:
break;
2773 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2783 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2795 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2796 ResType = GetByteVectorType(Ty);
2810X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
2811 unsigned &NeededSSE,
2812 unsigned &MaxVectorWidth)
const {
2814 assert(RT &&
"classifyRegCallStructType only valid with struct types");
2817 return getIndirectReturnResult(Ty);
2820 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->
getDecl())) {
2821 if (CXXRD->isDynamicClass()) {
2822 NeededInt = NeededSSE = 0;
2823 return getIndirectReturnResult(Ty);
2826 for (
const auto &I : CXXRD->bases())
2827 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2830 NeededInt = NeededSSE = 0;
2831 return getIndirectReturnResult(Ty);
2839 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2842 NeededInt = NeededSSE = 0;
2843 return getIndirectReturnResult(Ty);
2846 unsigned LocalNeededInt, LocalNeededSSE;
2850 NeededInt = NeededSSE = 0;
2851 return getIndirectReturnResult(Ty);
2853 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
2854 MTy = AT->getElementType();
2856 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2857 MaxVectorWidth = getContext().getTypeSize(VT);
2858 NeededInt += LocalNeededInt;
2859 NeededSSE += LocalNeededSSE;
2867X86_64ABIInfo::classifyRegCallStructType(
QualType Ty,
unsigned &NeededInt,
2868 unsigned &NeededSSE,
2869 unsigned &MaxVectorWidth)
const {
2875 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2886 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2887 Win64ABIInfo.computeInfo(FI);
2891 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
2894 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2895 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2896 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2903 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2904 FreeIntRegs -= NeededInt;
2905 FreeSSERegs -= NeededSSE;
2913 getContext().LongDoubleTy)
2925 else if (NeededSSE && MaxVectorWidth > 0)
2937 it != ie; ++it, ++ArgNo) {
2938 bool IsNamedArg = ArgNo < NumRequiredArgs;
2940 if (IsRegCall && it->type->isStructureOrClassType())
2941 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2945 NeededSSE, IsNamedArg);
2951 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2952 FreeIntRegs -= NeededInt;
2953 FreeSSERegs -= NeededSSE;
2957 it->info = getIndirectResult(it->type, FreeIntRegs);
2966 llvm::Value *overflow_arg_area =
2981 llvm::Value *Res = overflow_arg_area;
2989 llvm::Value *Offset =
2990 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
2992 Offset,
"overflow_arg_area.next");
2996 return Address(Res, LTy, Align);
3008 unsigned neededInt, neededSSE;
3016 if (!neededInt && !neededSSE)
3030 llvm::Value *InRegs =
nullptr;
3032 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3036 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3037 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3043 llvm::Value *FitsInFP =
3044 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3045 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3046 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3052 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3073 if (neededInt && neededSSE) {
3075 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3079 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3080 llvm::Type *TyLo = ST->getElementType(0);
3081 llvm::Type *TyHi = ST->getElementType(1);
3082 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3083 "Unexpected ABI info for mixed regs");
3084 llvm::Value *GPAddr =
3086 llvm::Value *FPAddr =
3088 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3089 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3105 }
else if (neededInt) {
3110 auto TInfo = getContext().getTypeInfoInChars(Ty);
3111 uint64_t TySize = TInfo.Width.getQuantity();
3122 }
else if (neededSSE == 1) {
3126 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3159 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3164 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3187 uint64_t Width = getContext().getTypeSize(Ty);
3188 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3196ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3202 isHomogeneousAggregate(Ty,
Base, NumElts) && FreeSSERegs >= NumElts) {
3203 FreeSSERegs -= NumElts;
3204 return getDirectX86Hva();
3210 bool IsReturnType,
bool IsVectorCall,
3211 bool IsRegCall)
const {
3217 Ty = EnumTy->getDecl()->getIntegerType();
3219 TypeInfo Info = getContext().getTypeInfo(Ty);
3225 if (!IsReturnType) {
3231 return getNaturalAlignIndirect(Ty,
false);
3239 if ((IsVectorCall || IsRegCall) &&
3240 isHomogeneousAggregate(Ty,
Base, NumElts)) {
3242 if (FreeSSERegs >= NumElts) {
3243 FreeSSERegs -= NumElts;
3249 }
else if (IsVectorCall) {
3250 if (FreeSSERegs >= NumElts &&
3252 FreeSSERegs -= NumElts;
3254 }
else if (IsReturnType) {
3266 llvm::Type *LLTy = CGT.ConvertType(Ty);
3267 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3274 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3275 return getNaturalAlignIndirect(Ty,
false);
3282 switch (BT->getKind()) {
3283 case BuiltinType::Bool:
3288 case BuiltinType::LongDouble:
3292 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3293 if (LDF == &llvm::APFloat::x87DoubleExtended())
3298 case BuiltinType::Int128:
3299 case BuiltinType::UInt128:
3309 llvm::Type::getInt64Ty(getVMContext()), 2));
3332 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3333 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3337 if (CC == llvm::CallingConv::X86_64_SysV) {
3338 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3339 SysVABIInfo.computeInfo(FI);
3343 unsigned FreeSSERegs = 0;
3347 }
else if (IsRegCall) {
3354 IsVectorCall, IsRegCall);
3359 }
else if (IsRegCall) {
3364 unsigned ArgNum = 0;
3365 unsigned ZeroSSERegs = 0;
3370 unsigned *MaybeFreeSSERegs =
3371 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3373 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3381 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3389 uint64_t Width = getContext().getTypeSize(Ty);
3390 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3399 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3400 unsigned NumRegisterParameters,
bool SoftFloatABI) {
3401 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3403 return std::make_unique<X86_32TargetCodeGenInfo>(
3404 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3405 NumRegisterParameters, SoftFloatABI);
3409 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3410 unsigned NumRegisterParameters) {
3411 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3413 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3414 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3415 NumRegisterParameters);
3418std::unique_ptr<TargetCodeGenInfo>
3421 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3424std::unique_ptr<TargetCodeGenInfo>
3427 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const TargetInfo & getTargetInfo() const
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
ASTContext & getContext() const
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
const TargetInfo & getTarget() const
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Address CreateGEP(Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CallArgList - Type for representing both the value and type of arguments in a call.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
ASTContext & getContext() const
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Address getAddress(CodeGenFunction &CGF) const
void setAddress(Address address)
A class for recording the number of arguments that a function signature requires.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
bool occupiesMoreThan(ArrayRef< llvm::Type * > scalarTypes, unsigned maxAllRegisters) const
Does the given lowering require more than the given number of registers when expanded?
virtual bool shouldPassIndirectly(ArrayRef< llvm::Type * > ComponentTys, bool AsReturnValue) const
Returns true if an aggregate which expands to the given type sequence should be passed / returned ind...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
virtual llvm::Type * adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type *Ty) const
Corrects the low-level LLVM type for a given constraint and "usual" type.
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Initializes the given DWARF EH register-size table, a char*.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual bool markARCOptimizedReturnCallsAsNoTail() const
Determine whether a call to objc_retainAutoreleasedReturnValue or objc_unsafeClaimAutoreleasedReturnV...
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
QualType getElementType() const
Represents the canonical version of C arrays with a specified constant size.
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
CallingConv getCallConv() const
@ Ver11
Attempt to be ABI-compatible with code generated by Clang 11.0.x (git 2e10b7a39b93).
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
const llvm::fltSemantics & getLongDoubleFormat() const
The base class of the type hierarchy.
bool isBlockPointerType() const
bool isFloat16Type() const
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
@ Result
The result type of a method or function.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
@ Class
The "class" keyword introduces the elaborated-type-specifier.
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty