10#include "TargetInfo.h"
12#include "llvm/ADT/SmallBitVector.h"
20bool IsX86_MMXType(llvm::Type *IRType) {
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
24 IRType->getScalarSizeInBits() != 64;
30 if (Constraint ==
"k") {
32 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
43 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
44 if (BT->getKind() == BuiltinType::LongDouble) {
45 if (&Context.getTargetInfo().getLongDoubleFormat() ==
46 &llvm::APFloat::x87DoubleExtended())
54 unsigned VecSize = Context.getTypeSize(VT);
55 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
63static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
64 return NumMembers <= 4;
68static ABIArgInfo getDirectX86Hva(llvm::Type*
T =
nullptr) {
71 AI.setCanBeFlattened(
false);
81 CCState(CGFunctionInfo &FI)
82 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
83 Required(FI.getRequiredArgs()), IsDelegateCall(FI.isDelegateCall()) {}
85 llvm::SmallBitVector IsPreassigned;
86 unsigned CC = CallingConv::CC_C;
87 unsigned FreeRegs = 0;
88 unsigned FreeSSERegs = 0;
89 RequiredArgs Required;
90 bool IsDelegateCall =
false;
94class X86_32ABIInfo :
public ABIInfo {
100 static const unsigned MinABIStackAlignInBytes = 4;
102 bool IsDarwinVectorABI;
103 bool IsRetSmallStructInRegABI;
104 bool IsWin32StructABI;
108 unsigned DefaultNumRegisterParameters;
110 static bool isRegisterSize(
unsigned Size) {
111 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
114 bool isHomogeneousAggregateBaseType(QualType Ty)
const override {
116 return isX86VectorTypeForVectorCall(getContext(), Ty);
119 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
120 uint64_t NumMembers)
const override {
122 return isX86VectorCallAggregateSmallEnough(NumMembers);
125 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context)
const;
129 ABIArgInfo getIndirectResult(QualType Ty,
bool ByVal, CCState &State)
const;
131 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State)
const;
134 unsigned getTypeStackAlignInBytes(QualType Ty,
unsigned Align)
const;
136 Class classify(QualType Ty)
const;
139 unsigned ArgIndex)
const;
143 bool updateFreeRegs(QualType Ty, CCState &State)
const;
145 bool shouldAggregateUseDirect(QualType Ty, CCState &State,
bool &InReg,
146 bool &NeedsPadding)
const;
147 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State)
const;
149 bool canExpandIndirectArgument(QualType Ty)
const;
153 void rewriteWithInAlloca(CGFunctionInfo &FI)
const;
155 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
156 CharUnits &StackOffset, ABIArgInfo &Info,
157 QualType
Type)
const;
158 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State)
const;
162 void computeInfo(CGFunctionInfo &FI)
const override;
163 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
164 AggValueSlot Slot)
const override;
166 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT,
bool DarwinVectorABI,
167 bool RetSmallStructInRegABI,
bool Win32StructABI,
168 unsigned NumRegisterParameters,
bool SoftFloatABI)
169 : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
170 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
171 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
172 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
173 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
174 CGT.getTarget().getTriple().isOSCygMing()),
175 DefaultNumRegisterParameters(NumRegisterParameters) {}
180 explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
181 : SwiftABIInfo(CGT,
false) {}
184 bool AsReturnValue)
const override {
189 return occupiesMoreThan(ComponentTys, 3);
195 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
bool DarwinVectorABI,
196 bool RetSmallStructInRegABI,
bool Win32StructABI,
197 unsigned NumRegisterParameters,
bool SoftFloatABI)
198 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
199 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
200 NumRegisterParameters, SoftFloatABI)) {
201 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
204 static bool isStructReturnInRegABI(
205 const llvm::Triple &Triple,
const CodeGenOptions &Opts);
207 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
208 CodeGen::CodeGenModule &CGM)
const override;
210 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
216 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
217 llvm::Value *Address)
const override;
219 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
220 StringRef Constraint,
221 llvm::Type* Ty)
const override {
222 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
225 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
226 std::string &Constraints,
227 std::vector<llvm::Type *> &ResultRegTypes,
228 std::vector<llvm::Type *> &ResultTruncRegTypes,
229 std::vector<LValue> &ResultRegDests,
230 std::string &AsmString,
231 unsigned NumOutputs)
const override;
233 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
234 return "movl\t%ebp, %ebp"
235 "\t\t// marker for objc_retainAutoreleaseReturnValue";
251 std::string &AsmString) {
253 llvm::raw_string_ostream OS(Buf);
255 while (Pos < AsmString.size()) {
256 size_t DollarStart = AsmString.find(
'$', Pos);
257 if (DollarStart == std::string::npos)
258 DollarStart = AsmString.size();
259 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
260 if (DollarEnd == std::string::npos)
261 DollarEnd = AsmString.size();
262 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
264 size_t NumDollars = DollarEnd - DollarStart;
265 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
267 size_t DigitStart = Pos;
268 if (AsmString[DigitStart] ==
'{') {
272 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
273 if (DigitEnd == std::string::npos)
274 DigitEnd = AsmString.size();
275 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
276 unsigned OperandIndex;
277 if (!OperandStr.getAsInteger(10, OperandIndex)) {
278 if (OperandIndex >= FirstIn)
279 OperandIndex += NumNewOuts;
287 AsmString = std::move(Buf);
291void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
293 std::vector<llvm::Type *> &ResultRegTypes,
294 std::vector<llvm::Type *> &ResultTruncRegTypes,
295 std::vector<LValue> &ResultRegDests, std::string &AsmString,
296 unsigned NumOutputs)
const {
301 if (!Constraints.empty())
303 if (RetWidth <= 32) {
304 Constraints +=
"={eax}";
305 ResultRegTypes.push_back(CGF.
Int32Ty);
309 ResultRegTypes.push_back(CGF.
Int64Ty);
313 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
314 ResultTruncRegTypes.push_back(CoerceTy);
317 ReturnSlot.setAddress(ReturnSlot.getAddress().withElementType(CoerceTy));
318 ResultRegDests.push_back(ReturnSlot);
325bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
326 ASTContext &Context)
const {
331 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
337 if (Size == 64 || Size == 128)
352 return shouldReturnTypeInRegister(AT->getElementType(), Context);
363 for (
const auto *FD : RD->fields()) {
369 if (!shouldReturnTypeInRegister(FD->getType(), Context))
378 Ty = CTy->getElementType();
387 uint64_t Size = Context.getTypeSize(Ty);
388 return Size == 32 || Size == 64;
393 for (
const auto *FD : RD->
fields()) {
403 if (FD->isBitField())
406 Size += Context.getTypeSize(FD->getType());
428bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty)
const {
434 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
435 if (!IsWin32StructABI) {
438 if (!CXXRD->isCLike())
442 if (CXXRD->isDynamicClass())
453 return Size == getContext().getTypeSize(Ty);
456ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State)
const {
459 if (State.CC != llvm::CallingConv::X86_FastCall &&
460 State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {
463 return getNaturalAlignIndirectInReg(RetTy);
465 return getNaturalAlignIndirect(
466 RetTy, getDataLayout().getAllocaAddrSpace(),
470ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
471 CCState &State)
const {
477 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
478 State.CC == llvm::CallingConv::X86_RegCall) &&
479 isHomogeneousAggregate(RetTy, Base, NumElts)) {
484 if (
const VectorType *VT = RetTy->
getAs<VectorType>()) {
486 if (IsDarwinVectorABI) {
494 llvm::Type::getInt64Ty(getVMContext()), 2));
498 if ((Size == 8 || Size == 16 || Size == 32) ||
499 (Size == 64 && VT->getNumElements() == 1))
503 return getIndirectReturnResult(RetTy, State);
513 return getIndirectReturnResult(RetTy, State);
517 return getIndirectReturnResult(RetTy, State);
524 if (
const ComplexType *CT = RetTy->
getAs<ComplexType>()) {
525 QualType ET = getContext().getCanonicalType(CT->getElementType());
528 llvm::Type::getHalfTy(getVMContext()), 2));
533 if (shouldReturnTypeInRegister(RetTy, getContext())) {
542 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
543 || SeltTy->hasPointerRepresentation())
551 return getIndirectReturnResult(RetTy, State);
556 RetTy = ED->getIntegerType();
558 if (
const auto *EIT = RetTy->
getAs<BitIntType>())
559 if (EIT->getNumBits() > 64)
560 return getIndirectReturnResult(RetTy, State);
566unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
567 unsigned Align)
const {
570 if (Align <= MinABIStackAlignInBytes)
578 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
582 if (!IsDarwinVectorABI) {
584 return MinABIStackAlignInBytes;
592 return MinABIStackAlignInBytes;
595ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
bool ByVal,
596 CCState &State)
const {
598 if (State.FreeRegs) {
601 return getNaturalAlignIndirectInReg(Ty);
603 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
608 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
609 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
613 getDataLayout().getAllocaAddrSpace(),
618 bool Realign = TypeAlign > StackAlign;
621 getDataLayout().getAllocaAddrSpace(),
true,
625X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty)
const {
630 if (
const BuiltinType *BT =
T->
getAs<BuiltinType>()) {
632 if (K == BuiltinType::Float || K == BuiltinType::Double)
638bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State)
const {
639 if (!IsSoftFloatABI) {
645 unsigned Size = getContext().getTypeSize(Ty);
646 unsigned SizeInRegs = (
Size + 31) / 32;
652 if (SizeInRegs > State.FreeRegs) {
661 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
665 State.FreeRegs -= SizeInRegs;
669bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
671 bool &NeedsPadding)
const {
678 NeedsPadding =
false;
681 if (!updateFreeRegs(Ty, State))
687 if (State.CC == llvm::CallingConv::X86_FastCall ||
688 State.CC == llvm::CallingConv::X86_VectorCall ||
689 State.CC == llvm::CallingConv::X86_RegCall) {
690 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
699bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State)
const {
700 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
704 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
705 State.CC == llvm::CallingConv::X86_VectorCall))
708 if (!updateFreeRegs(Ty, State))
711 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
718void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State)
const {
728 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.
arguments();
729 for (
int I = 0, E = Args.size(); I < E; ++I) {
732 const QualType &Ty = Args[I].type;
734 isHomogeneousAggregate(Ty, Base, NumElts)) {
735 if (State.FreeSSERegs >= NumElts) {
736 State.FreeSSERegs -= NumElts;
738 State.IsPreassigned.set(I);
744ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
745 unsigned ArgIndex)
const {
747 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
748 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
749 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
752 TypeInfo TI = getContext().getTypeInfo(Ty);
759 return getIndirectResult(Ty,
false, State);
760 }
else if (State.IsDelegateCall) {
763 ABIArgInfo Res = getIndirectResult(Ty,
false, State);
776 if ((IsRegCall || IsVectorCall) &&
777 isHomogeneousAggregate(Ty, Base, NumElts)) {
778 if (State.FreeSSERegs >= NumElts) {
779 State.FreeSSERegs -= NumElts;
784 return getDirectX86Hva();
792 return getIndirectResult(Ty,
false, State);
799 RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
800 return getIndirectResult(Ty,
true, State);
803 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
810 llvm::LLVMContext &LLVMContext = getVMContext();
811 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
812 bool NeedsPadding =
false;
814 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
815 unsigned SizeInRegs = (TI.
Width + 31) / 32;
816 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
817 llvm::Type *
Result = llvm::StructType::get(LLVMContext, Elements);
823 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
830 if (IsWin32StructABI && State.Required.
isRequiredArg(ArgIndex)) {
831 unsigned AlignInBits = 0;
833 const ASTRecordLayout &Layout =
834 getContext().getASTRecordLayout(RT->getOriginalDecl());
837 AlignInBits = TI.
Align;
839 if (AlignInBits > 32)
840 return getIndirectResult(Ty,
false, State);
849 if (TI.
Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
850 canExpandIndirectArgument(Ty))
852 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
854 return getIndirectResult(Ty,
true, State);
857 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
861 if (IsWin32StructABI) {
862 if (TI.
Width <= 512 && State.FreeSSERegs > 0) {
866 return getIndirectResult(Ty,
false, State);
871 if (IsDarwinVectorABI) {
873 (TI.
Width == 64 && VT->getNumElements() == 1))
875 llvm::IntegerType::get(getVMContext(), TI.
Width));
878 if (IsX86_MMXType(CGT.ConvertType(Ty)))
885 Ty = ED->getIntegerType();
887 bool InReg = shouldPrimitiveUseInReg(Ty, State);
889 if (isPromotableIntegerTypeForABI(Ty)) {
895 if (
const auto *EIT = Ty->
getAs<BitIntType>()) {
896 if (EIT->getNumBits() <= 64) {
901 return getIndirectResult(Ty,
false, State);
909void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
913 else if (State.CC == llvm::CallingConv::X86_FastCall) {
915 State.FreeSSERegs = 3;
916 }
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
918 State.FreeSSERegs = 6;
921 else if (State.CC == llvm::CallingConv::X86_RegCall) {
923 State.FreeSSERegs = 8;
924 }
else if (IsWin32StructABI) {
927 State.FreeRegs = DefaultNumRegisterParameters;
928 State.FreeSSERegs = 3;
930 State.FreeRegs = DefaultNumRegisterParameters;
937 if (State.FreeRegs) {
950 if (State.CC == llvm::CallingConv::X86_VectorCall)
951 runVectorCallFirstPass(FI, State);
953 bool UsedInAlloca =
false;
954 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.
arguments();
955 for (
unsigned I = 0, E = Args.size(); I < E; ++I) {
957 if (State.IsPreassigned.test(I))
968 rewriteWithInAlloca(FI);
972X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
973 CharUnits &StackOffset, ABIArgInfo &Info,
974 QualType
Type)
const {
977 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
982 bool IsIndirect =
false;
986 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
988 LLTy = llvm::PointerType::getUnqual(getVMContext());
989 FrameFields.push_back(LLTy);
990 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
993 CharUnits FieldEnd = StackOffset;
994 StackOffset = FieldEnd.
alignTo(WordSize);
995 if (StackOffset != FieldEnd) {
996 CharUnits NumBytes = StackOffset - FieldEnd;
997 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
998 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
999 FrameFields.push_back(Ty);
1022 llvm_unreachable(
"invalid enum");
1025void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI)
const {
1026 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1029 SmallVector<llvm::Type *, 6> FrameFields;
1034 CharUnits StackOffset;
1041 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1043 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1048 if (
Ret.isIndirect() && !
Ret.getInReg()) {
1049 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
1051 Ret.setInAllocaSRet(IsWin32StructABI);
1059 for (; I != E; ++I) {
1061 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1064 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1069RValue X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1070 QualType Ty, AggValueSlot Slot)
const {
1072 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1074 CCState State(*
const_cast<CGFunctionInfo *
>(CGF.
CurFnInfo));
1085 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
1092bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1093 const llvm::Triple &Triple,
const CodeGenOptions &Opts) {
1094 assert(Triple.getArch() == llvm::Triple::x86);
1096 switch (Opts.getStructReturnConvention()) {
1105 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1108 switch (Triple.getOS()) {
1109 case llvm::Triple::DragonFly:
1110 case llvm::Triple::FreeBSD:
1111 case llvm::Triple::OpenBSD:
1112 case llvm::Triple::Win32:
1121 if (!FD->
hasAttr<AnyX86InterruptAttr>())
1125 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1131 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1132 Fn->getContext(), ByValTy);
1133 Fn->addParamAttr(0, NewAttr);
1136void X86_32TargetCodeGenInfo::setTargetAttributes(
1137 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1138 if (GV->isDeclaration())
1140 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1141 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1143 Fn->addFnAttr(
"stackrealign");
1150bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1151 CodeGen::CodeGenFunction &CGF,
1152 llvm::Value *Address)
const {
1153 CodeGen::CGBuilderTy &Builder = CGF.
Builder;
1155 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1166 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1172 Builder.CreateAlignedStore(
1173 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1179 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1194static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1196 case X86AVXABILevel::AVX512:
1198 case X86AVXABILevel::AVX:
1200 case X86AVXABILevel::None:
1203 llvm_unreachable(
"Unknown AVXLevel");
1207class X86_64ABIInfo :
public ABIInfo {
1244 void postMerge(
unsigned AggregateSize,
Class &Lo,
Class &Hi)
const;
1272 void classify(QualType
T, uint64_t OffsetBase,
Class &Lo,
Class &Hi,
1273 bool isNamedArg,
bool IsRegCall =
false)
const;
1275 llvm::Type *GetByteVectorType(QualType Ty)
const;
1276 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1277 unsigned IROffset, QualType SourceTy,
1278 unsigned SourceOffset)
const;
1279 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1280 unsigned IROffset, QualType SourceTy,
1281 unsigned SourceOffset)
const;
1285 ABIArgInfo getIndirectReturnResult(QualType Ty)
const;
1292 ABIArgInfo getIndirectResult(QualType Ty,
unsigned freeIntRegs)
const;
1297 unsigned &neededInt,
unsigned &neededSSE,
1299 bool IsRegCall =
false)
const;
1301 ABIArgInfo classifyRegCallStructType(QualType Ty,
unsigned &NeededInt,
1302 unsigned &NeededSSE,
1303 unsigned &MaxVectorWidth)
const;
1305 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty,
unsigned &NeededInt,
1306 unsigned &NeededSSE,
1307 unsigned &MaxVectorWidth)
const;
1309 bool IsIllegalVectorType(QualType Ty)
const;
1316 bool honorsRevision0_98()
const {
1317 return !getTarget().getTriple().isOSDarwin();
1322 bool classifyIntegerMMXAsSSE()
const {
1324 if (getContext().getLangOpts().getClangABICompat() <=
1325 LangOptions::ClangABI::Ver3_8)
1328 const llvm::Triple &Triple = getTarget().getTriple();
1329 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1335 bool passInt128VectorsInMem()
const {
1337 if (getContext().getLangOpts().getClangABICompat() <=
1338 LangOptions::ClangABI::Ver9)
1341 const llvm::Triple &
T = getTarget().getTriple();
1342 return T.isOSLinux() ||
T.isOSNetBSD();
1345 bool returnCXXRecordGreaterThan128InMem()
const {
1347 if (getContext().getLangOpts().getClangABICompat() <=
1348 LangOptions::ClangABI::Ver20)
1357 bool Has64BitPointers;
1360 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1361 : ABIInfo(CGT), AVXLevel(AVXLevel),
1362 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1364 bool isPassedUsingAVXType(QualType
type)
const {
1365 unsigned neededInt, neededSSE;
1371 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1372 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1377 void computeInfo(CGFunctionInfo &FI)
const override;
1379 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1380 AggValueSlot Slot)
const override;
1381 RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1382 AggValueSlot Slot)
const override;
1384 bool has64BitPointers()
const {
1385 return Has64BitPointers;
1390class WinX86_64ABIInfo :
public ABIInfo {
1392 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1393 : ABIInfo(CGT), AVXLevel(AVXLevel),
1394 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1396 void computeInfo(CGFunctionInfo &FI)
const override;
1398 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1399 AggValueSlot Slot)
const override;
1401 bool isHomogeneousAggregateBaseType(QualType Ty)
const override {
1403 return isX86VectorTypeForVectorCall(getContext(), Ty);
1406 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1407 uint64_t NumMembers)
const override {
1409 return isX86VectorCallAggregateSmallEnough(NumMembers);
1413 ABIArgInfo classify(QualType Ty,
unsigned &FreeSSERegs,
bool IsReturnType,
1414 bool IsVectorCall,
bool IsRegCall)
const;
1415 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty,
unsigned &FreeSSERegs,
1416 const ABIArgInfo ¤t)
const;
1423class X86_64TargetCodeGenInfo :
public TargetCodeGenInfo {
1425 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1426 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
1428 std::make_unique<SwiftABIInfo>(CGT,
true);
1433 bool markARCOptimizedReturnCallsAsNoTail()
const override {
return true; }
1435 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
1439 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1440 llvm::Value *Address)
const override {
1441 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1449 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1450 StringRef Constraint,
1451 llvm::Type* Ty)
const override {
1452 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1455 bool isNoProtoCallVariadic(
const CallArgList &args,
1456 const FunctionNoProtoType *fnType)
const override {
1464 bool HasAVXType =
false;
1465 for (
const CallArg &arg : args) {
1466 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(
arg.Ty)) {
1479 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1480 CodeGen::CodeGenModule &CGM)
const override {
1481 if (GV->isDeclaration())
1483 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1484 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1486 Fn->addFnAttr(
"stackrealign");
1493 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
1494 const FunctionDecl *Caller,
1495 const FunctionDecl *Callee,
const CallArgList &Args,
1496 QualType ReturnType)
const override;
1501 llvm::StringMap<bool> &CallerMap,
1503 llvm::StringMap<bool> &CalleeMap,
1505 if (CalleeMap.empty() && CallerMap.empty()) {
1517 const llvm::StringMap<bool> &CallerMap,
1518 const llvm::StringMap<bool> &CalleeMap,
1521 bool CallerHasFeat = CallerMap.lookup(
Feature);
1522 bool CalleeHasFeat = CalleeMap.lookup(
Feature);
1524 if (!CallerHasFeat && !CalleeHasFeat &&
1525 (!Callee.isExternallyVisible() || Callee.hasAttr<AlwaysInlineAttr>()))
1528 if (!CallerHasFeat && !CalleeHasFeat)
1529 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1530 << IsArgument << Ty <<
Feature;
1533 if (!CallerHasFeat || !CalleeHasFeat)
1534 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1535 << IsArgument << Ty <<
Feature;
1544 const llvm::StringMap<bool> &CallerMap,
1545 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
1550 "avx512f", IsArgument);
1559void X86_64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
1560 SourceLocation CallLoc,
1561 const FunctionDecl *Caller,
1562 const FunctionDecl *Callee,
1563 const CallArgList &Args,
1564 QualType ReturnType)
const {
1568 llvm::StringMap<bool> CallerMap;
1569 llvm::StringMap<bool> CalleeMap;
1570 unsigned ArgIndex = 0;
1574 for (
const CallArg &Arg : Args) {
1582 if (Arg.getType()->isVectorType() &&
1585 QualType Ty = Arg.getType();
1588 if (ArgIndex < Callee->getNumParams())
1589 Ty =
Callee->getParamDecl(ArgIndex)->getType();
1592 CallerMap, CalleeMap, Ty,
true))
1600 if (
Callee->getReturnType()->isVectorType() &&
1604 CalleeMap,
Callee->getReturnType(),
1613 bool Quote = Lib.contains(
' ');
1614 std::string ArgStr = Quote ?
"\"" :
"";
1616 if (!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1618 ArgStr += Quote ?
"\"" :
"";
1623class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1626 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1627 unsigned NumRegisterParameters)
1628 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1629 Win32StructABI, NumRegisterParameters,
false) {}
1631 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1634 void getDependentLibraryOption(llvm::StringRef Lib,
1636 Opt =
"/DEFAULTLIB:";
1637 Opt += qualifyWindowsLibrary(Lib);
1640 void getDetectMismatchOption(llvm::StringRef Name,
1641 llvm::StringRef
Value,
1643 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1648void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1649 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1650 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1651 if (GV->isDeclaration())
1653 addStackProbeTargetAttributes(D, GV, CGM);
1657class WinX86_64TargetCodeGenInfo :
public TargetCodeGenInfo {
1659 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1661 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
1663 std::make_unique<SwiftABIInfo>(CGT,
true);
1666 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1667 CodeGen::CodeGenModule &CGM)
const override;
1669 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
1673 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1674 llvm::Value *Address)
const override {
1675 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1683 void getDependentLibraryOption(llvm::StringRef Lib,
1684 llvm::SmallString<24> &Opt)
const override {
1685 Opt =
"/DEFAULTLIB:";
1686 Opt += qualifyWindowsLibrary(Lib);
1689 void getDetectMismatchOption(llvm::StringRef Name,
1690 llvm::StringRef
Value,
1691 llvm::SmallString<32> &Opt)
const override {
1692 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1697void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1698 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1700 if (GV->isDeclaration())
1702 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1703 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1705 Fn->addFnAttr(
"stackrealign");
1711 addStackProbeTargetAttributes(D, GV, CGM);
1714void X86_64ABIInfo::postMerge(
unsigned AggregateSize,
Class &Lo,
1739 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1741 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1743 if (Hi == SSEUp && Lo != SSE)
1747X86_64ABIInfo::Class X86_64ABIInfo::merge(
Class Accum,
Class Field) {
1771 assert((Accum != Memory && Accum != ComplexX87) &&
1772 "Invalid accumulated classification during merge.");
1773 if (Accum == Field || Field == NoClass)
1775 if (Field == Memory)
1777 if (Accum == NoClass)
1781 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1782 Accum == X87 || Accum == X87Up)
1787void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Class &Lo,
1788 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
1799 Class &Current = OffsetBase < 64 ? Lo : Hi;
1802 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
1805 if (k == BuiltinType::Void) {
1807 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1810 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1812 }
else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1813 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1815 }
else if (k == BuiltinType::Float128) {
1818 }
else if (k == BuiltinType::LongDouble) {
1819 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1820 if (LDF == &llvm::APFloat::IEEEquad()) {
1823 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1826 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
1829 llvm_unreachable(
"unexpected long double representation!");
1838 classify(ED->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1849 if (Has64BitPointers) {
1856 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1857 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1858 if (EB_FuncPtr != EB_ThisAdj) {
1870 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
1872 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1881 uint64_t EB_Lo = (OffsetBase) / 64;
1885 }
else if (Size == 64) {
1886 QualType ElementType = VT->getElementType();
1895 if (!classifyIntegerMMXAsSSE() &&
1906 if (OffsetBase && OffsetBase != 64)
1908 }
else if (Size == 128 ||
1909 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1910 QualType ElementType = VT->getElementType();
1913 if (passInt128VectorsInMem() && Size != 128 &&
1938 if (
const ComplexType *CT = Ty->
getAs<ComplexType>()) {
1945 else if (Size <= 128)
1947 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
1950 }
else if (ET == getContext().DoubleTy) {
1952 }
else if (ET == getContext().LongDoubleTy) {
1953 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1954 if (LDF == &llvm::APFloat::IEEEquad())
1956 else if (LDF == &llvm::APFloat::x87DoubleExtended())
1957 Current = ComplexX87;
1958 else if (LDF == &llvm::APFloat::IEEEdouble())
1961 llvm_unreachable(
"unexpected long double representation!");
1966 uint64_t EB_Real = (OffsetBase) / 64;
1967 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1968 if (Hi == NoClass && EB_Real != EB_Imag)
1974 if (
const auto *EITy = Ty->
getAs<BitIntType>()) {
1975 if (EITy->getNumBits() <= 64)
1977 else if (EITy->getNumBits() <= 128)
1983 if (
const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1992 if (!IsRegCall && Size > 512)
1999 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2005 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2006 uint64_t ArraySize = AT->getZExtSize();
2013 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2016 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2017 Class FieldLo, FieldHi;
2018 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2019 Lo = merge(Lo, FieldLo);
2020 Hi = merge(Hi, FieldHi);
2021 if (Lo == Memory || Hi == Memory)
2025 postMerge(Size, Lo, Hi);
2026 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2044 const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
2050 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2056 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2057 for (
const auto &I : CXXRD->bases()) {
2058 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2059 "Unexpected base class!");
2060 const auto *
Base = I.getType()->castAsCXXRecordDecl();
2066 Class FieldLo, FieldHi;
2069 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2070 Lo = merge(Lo, FieldLo);
2071 Hi = merge(Hi, FieldHi);
2072 if (returnCXXRecordGreaterThan128InMem() &&
2073 (Size > 128 && (Size != getContext().getTypeSize(I.getType()) ||
2074 Size > getNativeVectorSizeForAVXABI(AVXLevel)))) {
2079 if (Lo == Memory || Hi == Memory) {
2080 postMerge(Size, Lo, Hi);
2088 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2089 LangOptions::ClangABI::Ver11 ||
2090 getContext().getTargetInfo().getTriple().isPS();
2091 bool IsUnion = RT->isUnionType() && !UseClang11Compat;
2094 i != e; ++i, ++idx) {
2096 bool BitField = i->isBitField();
2099 if (BitField && i->isUnnamedBitField())
2112 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2113 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2115 postMerge(Size, Lo, Hi);
2120 Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
2122 if (!BitField && IsInMemory) {
2124 postMerge(Size, Lo, Hi);
2134 Class FieldLo, FieldHi;
2140 assert(!i->isUnnamedBitField());
2148 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2153 FieldHi = EB_Hi ?
Integer : NoClass;
2156 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2157 Lo = merge(Lo, FieldLo);
2158 Hi = merge(Hi, FieldHi);
2159 if (Lo == Memory || Hi == Memory)
2163 postMerge(Size, Lo, Hi);
2167ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty)
const {
2173 Ty = ED->getIntegerType();
2176 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
2182 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
2185bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty)
const {
2186 if (
const VectorType *VecTy = Ty->
getAs<VectorType>()) {
2188 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2189 if (Size <= 64 || Size > LargestVector)
2191 QualType EltTy = VecTy->getElementType();
2192 if (passInt128VectorsInMem() &&
2201ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2202 unsigned freeIntRegs)
const {
2215 Ty = ED->getIntegerType();
2222 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
2227 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2250 if (freeIntRegs == 0) {
2255 if (Align == 8 && Size <= 64)
2261 getDataLayout().getAllocaAddrSpace());
2266llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty)
const {
2270 Ty = QualType(InnerTy, 0);
2272 llvm::Type *IRType = CGT.ConvertType(Ty);
2276 if (passInt128VectorsInMem() &&
2280 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2287 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2292 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2296 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2312 unsigned TySize = (
unsigned)Context.getTypeSize(Ty);
2313 if (TySize <= StartBit)
2317 unsigned EltSize = (
unsigned)Context.getTypeSize(AT->getElementType());
2318 unsigned NumElts = (
unsigned)AT->getZExtSize();
2321 for (
unsigned i = 0; i != NumElts; ++i) {
2323 unsigned EltOffset = i*EltSize;
2324 if (EltOffset >= EndBit)
break;
2326 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2328 EndBit-EltOffset, Context))
2339 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2340 for (
const auto &I : CXXRD->bases()) {
2341 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2342 "Unexpected base class!");
2343 const auto *
Base = I.getType()->castAsCXXRecordDecl();
2347 if (BaseOffset >= EndBit)
continue;
2349 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2351 EndBit-BaseOffset, Context))
2362 i != e; ++i, ++idx) {
2366 if (FieldOffset >= EndBit)
break;
2368 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2384 const llvm::DataLayout &TD) {
2385 if (IROffset == 0 && IRType->isFloatingPointTy())
2389 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2390 if (!STy->getNumContainedTypes())
2393 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2394 unsigned Elt = SL->getElementContainingOffset(IROffset);
2395 IROffset -= SL->getElementOffset(Elt);
2400 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2401 llvm::Type *EltTy = ATy->getElementType();
2402 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2403 IROffset -= IROffset / EltSize * EltSize;
2412llvm::Type *X86_64ABIInfo::
2413GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2414 QualType SourceTy,
unsigned SourceOffset)
const {
2415 const llvm::DataLayout &TD = getDataLayout();
2416 unsigned SourceSize =
2417 (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2419 if (!T0 || T0->isDoubleTy())
2420 return llvm::Type::getDoubleTy(getVMContext());
2423 llvm::Type *T1 =
nullptr;
2424 unsigned T0Size = TD.getTypeAllocSize(T0);
2425 if (SourceSize > T0Size)
2427 if (T1 ==
nullptr) {
2430 if (T0->is16bitFPTy() && SourceSize > 4)
2439 if (T0->isFloatTy() && T1->isFloatTy())
2440 return llvm::FixedVectorType::get(T0, 2);
2442 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2443 llvm::Type *T2 =
nullptr;
2447 return llvm::FixedVectorType::get(T0, 2);
2448 return llvm::FixedVectorType::get(T0, 4);
2451 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2452 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2454 return llvm::Type::getDoubleTy(getVMContext());
2471llvm::Type *X86_64ABIInfo::
2472GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2473 QualType SourceTy,
unsigned SourceOffset)
const {
2476 if (IROffset == 0) {
2479 IRType->isIntegerTy(64))
2488 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2489 IRType->isIntegerTy(32) ||
2495 SourceOffset*8+64, getContext()))
2500 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2502 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2503 if (IROffset < SL->getSizeInBytes()) {
2504 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2505 IROffset -= SL->getElementOffset(FieldIdx);
2507 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2508 SourceTy, SourceOffset);
2512 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2513 llvm::Type *EltTy = ATy->getElementType();
2514 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2515 unsigned EltOffset = IROffset/EltSize*EltSize;
2516 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2522 if (IRType->isIntegerTy(128)) {
2523 assert(IROffset == 0);
2529 unsigned TySizeInBytes =
2530 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2532 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2536 return llvm::IntegerType::get(getVMContext(),
2537 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2548 const llvm::DataLayout &TD) {
2553 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2554 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2555 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2556 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2567 if (Lo->isHalfTy() || Lo->isFloatTy())
2568 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2570 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2571 &&
"Invalid/unknown lo type");
2572 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2576 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
2579 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2580 "Invalid x86-64 argument pair!");
2584ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy)
const {
2587 X86_64ABIInfo::Class Lo, Hi;
2588 classify(RetTy, 0, Lo, Hi,
true);
2591 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2592 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2594 llvm::Type *ResType =
nullptr;
2601 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2602 "Unknown missing lo part");
2607 llvm_unreachable(
"Invalid classification for lo word.");
2612 return getIndirectReturnResult(RetTy);
2617 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2624 RetTy = ED->getIntegerType();
2627 isPromotableIntegerTypeForABI(RetTy))
2631 if (ResType->isIntegerTy(128)) {
2641 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2647 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2654 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2655 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2656 llvm::Type::getX86_FP80Ty(getVMContext()));
2660 llvm::Type *HighPart =
nullptr;
2666 llvm_unreachable(
"Invalid classification for hi word.");
2673 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2678 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2689 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2690 ResType = GetByteVectorType(RetTy);
2701 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2718X86_64ABIInfo::classifyArgumentType(QualType Ty,
unsigned freeIntRegs,
2719 unsigned &neededInt,
unsigned &neededSSE,
2720 bool isNamedArg,
bool IsRegCall)
const {
2723 X86_64ABIInfo::Class Lo, Hi;
2724 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2728 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2729 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2733 llvm::Type *ResType =
nullptr;
2740 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2741 "Unknown missing lo part");
2754 return getIndirectResult(Ty, freeIntRegs);
2758 llvm_unreachable(
"Invalid classification for lo word.");
2767 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2774 Ty = ED->getIntegerType();
2777 isPromotableIntegerTypeForABI(Ty))
2781 if (ResType->isIntegerTy(128)) {
2792 llvm::Type *IRType = CGT.ConvertType(Ty);
2793 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2799 llvm::Type *HighPart =
nullptr;
2807 llvm_unreachable(
"Invalid classification for hi word.");
2809 case NoClass:
break;
2814 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2825 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2835 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2836 ResType = GetByteVectorType(Ty);
2850X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty,
unsigned &NeededInt,
2851 unsigned &NeededSSE,
2852 unsigned &MaxVectorWidth)
const {
2855 ->getDefinitionOrSelf();
2858 return getIndirectReturnResult(Ty);
2861 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2862 if (CXXRD->isDynamicClass()) {
2863 NeededInt = NeededSSE = 0;
2864 return getIndirectReturnResult(Ty);
2867 for (
const auto &I : CXXRD->bases())
2868 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2871 NeededInt = NeededSSE = 0;
2872 return getIndirectReturnResult(Ty);
2877 for (
const auto *FD : RD->
fields()) {
2878 QualType MTy = FD->getType();
2880 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2883 NeededInt = NeededSSE = 0;
2884 return getIndirectReturnResult(Ty);
2887 unsigned LocalNeededInt, LocalNeededSSE;
2891 NeededInt = NeededSSE = 0;
2892 return getIndirectReturnResult(Ty);
2894 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
2895 MTy = AT->getElementType();
2896 if (
const auto *VT = MTy->
getAs<VectorType>())
2897 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2898 MaxVectorWidth = getContext().getTypeSize(VT);
2899 NeededInt += LocalNeededInt;
2900 NeededSSE += LocalNeededSSE;
2908X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
unsigned &NeededInt,
2909 unsigned &NeededSSE,
2910 unsigned &MaxVectorWidth)
const {
2916 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2920void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
2927 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2928 Win64ABIInfo.computeInfo(FI);
2932 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
2935 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2936 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2937 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2944 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2945 FreeIntRegs -= NeededInt;
2946 FreeSSERegs -= NeededSSE;
2952 ->
getAs<ComplexType>()
2953 ->getElementType()) ==
2954 getContext().LongDoubleTy)
2966 else if (NeededSSE && MaxVectorWidth > 0)
2978 it != ie; ++it, ++ArgNo) {
2979 bool IsNamedArg = ArgNo < NumRequiredArgs;
2981 if (IsRegCall && it->type->isStructureOrClassType())
2982 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2986 NeededSSE, IsNamedArg);
2992 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2993 FreeIntRegs -= NeededInt;
2994 FreeSSERegs -= NeededSSE;
2998 it->info = getIndirectResult(it->type, FreeIntRegs);
3007 llvm::Value *overflow_arg_area =
3022 llvm::Value *Res = overflow_arg_area;
3029 uint64_t SizeInBytes = (CGF.
getContext().getTypeSize(Ty) + 7) / 8;
3030 llvm::Value *Offset =
3031 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3033 Offset,
"overflow_arg_area.next");
3037 return Address(Res, LTy, Align);
3040RValue X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3041 QualType Ty, AggValueSlot Slot)
const {
3049 unsigned neededInt, neededSSE;
3061 if (!neededInt && !neededSSE)
3077 llvm::Value *InRegs =
nullptr;
3079 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3083 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3084 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3090 llvm::Value *FitsInFP =
3091 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3092 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3093 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3099 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3120 if (neededInt && neededSSE) {
3122 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3126 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3127 llvm::Type *TyLo = ST->getElementType(0);
3128 llvm::Type *TyHi = ST->getElementType(1);
3129 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3130 "Unexpected ABI info for mixed regs");
3131 llvm::Value *GPAddr =
3133 llvm::Value *FPAddr =
3135 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3136 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3152 }
else if (neededInt || neededSSE == 1) {
3154 auto TInfo = getContext().getTypeInfoInChars(Ty);
3155 uint64_t TySize = TInfo.Width.getQuantity();
3156 CharUnits TyAlign = TInfo.Align;
3157 llvm::Type *CoTy =
nullptr;
3161 llvm::Value *GpOrFpOffset = neededInt ? gp_offset : fp_offset;
3162 uint64_t Alignment = neededInt ? 8 : 16;
3163 uint64_t RegSize = neededInt ? neededInt * 8 : 16;
3188 llvm::Value *PtrOffset =
3190 Address Dst = Address(
3210 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3243 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3248 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3262 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3267RValue X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3268 QualType Ty, AggValueSlot Slot)
const {
3271 uint64_t Width = getContext().getTypeSize(Ty);
3272 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3280ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3281 QualType Ty,
unsigned &FreeSSERegs,
const ABIArgInfo ¤t)
const {
3286 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3287 FreeSSERegs -= NumElts;
3288 return getDirectX86Hva();
3293ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty,
unsigned &FreeSSERegs,
3294 bool IsReturnType,
bool IsVectorCall,
3295 bool IsRegCall)
const {
3301 Ty = ED->getIntegerType();
3303 TypeInfo Info = getContext().getTypeInfo(Ty);
3305 CharUnits Align = getContext().toCharUnitsFromBits(Info.
Align);
3309 if (!IsReturnType) {
3311 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3315 if (RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
3316 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3324 if ((IsVectorCall || IsRegCall) &&
3325 isHomogeneousAggregate(Ty, Base, NumElts)) {
3327 if (FreeSSERegs >= NumElts) {
3328 FreeSSERegs -= NumElts;
3334 Align, getDataLayout().getAllocaAddrSpace(),
3336 }
else if (IsVectorCall) {
3337 if (FreeSSERegs >= NumElts &&
3339 FreeSSERegs -= NumElts;
3341 }
else if (IsReturnType) {
3346 Align, getDataLayout().getAllocaAddrSpace(),
3355 llvm::Type *LLTy = CGT.ConvertType(Ty);
3356 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3363 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3364 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3371 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
3372 switch (BT->getKind()) {
3373 case BuiltinType::Bool:
3378 case BuiltinType::LongDouble:
3382 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3383 if (LDF == &llvm::APFloat::x87DoubleExtended())
3385 Align, getDataLayout().getAllocaAddrSpace(),
3390 case BuiltinType::Int128:
3391 case BuiltinType::UInt128:
3392 case BuiltinType::Float128:
3400 Align, getDataLayout().getAllocaAddrSpace(),
3408 llvm::Type::getInt64Ty(getVMContext()), 2));
3424 Align, getDataLayout().getAllocaAddrSpace(),
3431void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
3433 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3434 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3438 if (CC == llvm::CallingConv::X86_64_SysV) {
3439 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3440 SysVABIInfo.computeInfo(FI);
3444 unsigned FreeSSERegs = 0;
3448 }
else if (IsRegCall) {
3455 IsVectorCall, IsRegCall);
3460 }
else if (IsRegCall) {
3465 unsigned ArgNum = 0;
3466 unsigned ZeroSSERegs = 0;
3471 unsigned *MaybeFreeSSERegs =
3472 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3474 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3482 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3486RValue WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3487 QualType Ty, AggValueSlot Slot)
const {
3490 uint64_t Width = getContext().getTypeSize(Ty);
3491 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3500 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3501 unsigned NumRegisterParameters,
bool SoftFloatABI) {
3502 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3504 return std::make_unique<X86_32TargetCodeGenInfo>(
3505 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3506 NumRegisterParameters, SoftFloatABI);
3510 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3511 unsigned NumRegisterParameters) {
3512 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3514 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3515 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3516 NumRegisterParameters);
3519std::unique_ptr<TargetCodeGenInfo>
3522 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3525std::unique_ptr<TargetCodeGenInfo>
3528 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const FunctionDecl &Callee, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const FunctionDecl &Callee, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
unsigned getDirectOffset() const
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getIndirect(CharUnits Alignment, unsigned AddrSpace, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
llvm::LLVMContext & getLLVMContext()
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool isRequiredArg(unsigned argIdx) const
Return true if the argument at a given index is required.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
CallingConv getCallConv() const
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
specific_decl_iterator< FieldDecl > field_iterator
field_iterator field_begin() const
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBlockPointerType() const
bool isFloat16Type() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
bool shouldPassIndirectly(CodeGenModule &CGM, ArrayRef< llvm::Type * > types, bool asReturnValue)
Should an aggregate which expands to the given type sequence be passed/returned indirectly under swif...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "singleelement struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Result
The result type of a method or function.
const FunctionProtoType * T
@ Type
The name was classified as a type.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
@ Class
The "class" keyword introduces the elaborated-type-specifier.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty