10#include "TargetInfo.h"
12#include "llvm/ADT/SmallBitVector.h"
20bool IsX86_MMXType(llvm::Type *IRType) {
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
24 IRType->getScalarSizeInBits() != 64;
30 if (Constraint ==
"k") {
32 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
43 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
44 if (BT->getKind() == BuiltinType::LongDouble) {
45 if (&Context.getTargetInfo().getLongDoubleFormat() ==
46 &llvm::APFloat::x87DoubleExtended())
54 unsigned VecSize = Context.getTypeSize(VT);
55 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
63static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
64 return NumMembers <= 4;
68static ABIArgInfo getDirectX86Hva(llvm::Type*
T =
nullptr) {
71 AI.setCanBeFlattened(
false);
81 CCState(CGFunctionInfo &FI)
82 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
83 Required(FI.getRequiredArgs()), IsDelegateCall(FI.isDelegateCall()) {}
85 llvm::SmallBitVector IsPreassigned;
86 unsigned CC = CallingConv::CC_C;
87 unsigned FreeRegs = 0;
88 unsigned FreeSSERegs = 0;
89 RequiredArgs Required;
90 bool IsDelegateCall =
false;
94class X86_32ABIInfo :
public ABIInfo {
100 static const unsigned MinABIStackAlignInBytes = 4;
102 bool IsDarwinVectorABI;
103 bool IsRetSmallStructInRegABI;
104 bool IsWin32StructABI;
108 unsigned DefaultNumRegisterParameters;
110 static bool isRegisterSize(
unsigned Size) {
111 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
114 bool isHomogeneousAggregateBaseType(QualType Ty)
const override {
116 return isX86VectorTypeForVectorCall(getContext(), Ty);
119 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
120 uint64_t NumMembers)
const override {
122 return isX86VectorCallAggregateSmallEnough(NumMembers);
125 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context)
const;
129 ABIArgInfo getIndirectResult(QualType Ty,
bool ByVal, CCState &State)
const;
131 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State)
const;
134 unsigned getTypeStackAlignInBytes(QualType Ty,
unsigned Align)
const;
136 Class classify(QualType Ty)
const;
139 unsigned ArgIndex)
const;
143 bool updateFreeRegs(QualType Ty, CCState &State)
const;
145 bool shouldAggregateUseDirect(QualType Ty, CCState &State,
bool &InReg,
146 bool &NeedsPadding)
const;
147 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State)
const;
149 bool canExpandIndirectArgument(QualType Ty)
const;
153 void rewriteWithInAlloca(CGFunctionInfo &FI)
const;
155 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
156 CharUnits &StackOffset, ABIArgInfo &Info,
157 QualType
Type)
const;
158 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State)
const;
162 void computeInfo(CGFunctionInfo &FI)
const override;
163 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
164 AggValueSlot Slot)
const override;
166 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT,
bool DarwinVectorABI,
167 bool RetSmallStructInRegABI,
bool Win32StructABI,
168 unsigned NumRegisterParameters,
bool SoftFloatABI)
169 : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
170 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
171 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
172 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
173 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
174 CGT.getTarget().getTriple().isOSCygMing()),
175 DefaultNumRegisterParameters(NumRegisterParameters) {}
180 explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
181 : SwiftABIInfo(CGT,
false) {}
184 bool AsReturnValue)
const override {
189 return occupiesMoreThan(ComponentTys, 3);
195 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
bool DarwinVectorABI,
196 bool RetSmallStructInRegABI,
bool Win32StructABI,
197 unsigned NumRegisterParameters,
bool SoftFloatABI)
198 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
199 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
200 NumRegisterParameters, SoftFloatABI)) {
201 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
204 static bool isStructReturnInRegABI(
205 const llvm::Triple &Triple,
const CodeGenOptions &Opts);
207 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
208 CodeGen::CodeGenModule &CGM)
const override;
210 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
216 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
217 llvm::Value *Address)
const override;
219 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
220 StringRef Constraint,
221 llvm::Type* Ty)
const override {
222 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
225 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
226 std::string &Constraints,
227 std::vector<llvm::Type *> &ResultRegTypes,
228 std::vector<llvm::Type *> &ResultTruncRegTypes,
229 std::vector<LValue> &ResultRegDests,
230 std::string &AsmString,
231 unsigned NumOutputs)
const override;
233 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
234 return "movl\t%ebp, %ebp"
235 "\t\t// marker for objc_retainAutoreleaseReturnValue";
251 std::string &AsmString) {
253 llvm::raw_string_ostream OS(Buf);
255 while (Pos < AsmString.size()) {
256 size_t DollarStart = AsmString.find(
'$', Pos);
257 if (DollarStart == std::string::npos)
258 DollarStart = AsmString.size();
259 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
260 if (DollarEnd == std::string::npos)
261 DollarEnd = AsmString.size();
262 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
264 size_t NumDollars = DollarEnd - DollarStart;
265 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
267 size_t DigitStart = Pos;
268 if (AsmString[DigitStart] ==
'{') {
272 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
273 if (DigitEnd == std::string::npos)
274 DigitEnd = AsmString.size();
275 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
276 unsigned OperandIndex;
277 if (!OperandStr.getAsInteger(10, OperandIndex)) {
278 if (OperandIndex >= FirstIn)
279 OperandIndex += NumNewOuts;
287 AsmString = std::move(Buf);
291void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
293 std::vector<llvm::Type *> &ResultRegTypes,
294 std::vector<llvm::Type *> &ResultTruncRegTypes,
295 std::vector<LValue> &ResultRegDests, std::string &AsmString,
296 unsigned NumOutputs)
const {
301 if (!Constraints.empty())
303 if (RetWidth <= 32) {
304 Constraints +=
"={eax}";
305 ResultRegTypes.push_back(CGF.
Int32Ty);
309 ResultRegTypes.push_back(CGF.
Int64Ty);
313 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
314 ResultTruncRegTypes.push_back(CoerceTy);
317 ReturnSlot.setAddress(ReturnSlot.getAddress().withElementType(CoerceTy));
318 ResultRegDests.push_back(ReturnSlot);
325bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
326 ASTContext &Context)
const {
331 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
337 if (Size == 64 || Size == 128)
352 return shouldReturnTypeInRegister(AT->getElementType(), Context);
363 for (
const auto *FD : RD->fields()) {
369 if (!shouldReturnTypeInRegister(FD->getType(), Context))
378 Ty = CTy->getElementType();
387 uint64_t Size = Context.getTypeSize(Ty);
388 return Size == 32 || Size == 64;
393 for (
const auto *FD : RD->
fields()) {
403 if (FD->isBitField())
406 Size += Context.getTypeSize(FD->getType());
428bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty)
const {
434 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
435 if (!IsWin32StructABI) {
438 if (!CXXRD->isCLike())
442 if (CXXRD->isDynamicClass())
453 return Size == getContext().getTypeSize(Ty);
456ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State)
const {
459 if (State.CC != llvm::CallingConv::X86_FastCall &&
460 State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {
463 return getNaturalAlignIndirectInReg(RetTy);
465 return getNaturalAlignIndirect(
466 RetTy, getDataLayout().getAllocaAddrSpace(),
470ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
471 CCState &State)
const {
477 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
478 State.CC == llvm::CallingConv::X86_RegCall) &&
479 isHomogeneousAggregate(RetTy, Base, NumElts)) {
484 if (
const VectorType *VT = RetTy->
getAs<VectorType>()) {
486 if (IsDarwinVectorABI) {
494 llvm::Type::getInt64Ty(getVMContext()), 2));
498 if ((Size == 8 || Size == 16 || Size == 32) ||
499 (Size == 64 && VT->getNumElements() == 1))
503 return getIndirectReturnResult(RetTy, State);
513 return getIndirectReturnResult(RetTy, State);
517 return getIndirectReturnResult(RetTy, State);
524 if (
const ComplexType *CT = RetTy->
getAs<ComplexType>()) {
525 QualType ET = getContext().getCanonicalType(CT->getElementType());
528 llvm::Type::getHalfTy(getVMContext()), 2));
533 if (shouldReturnTypeInRegister(RetTy, getContext())) {
542 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
543 || SeltTy->hasPointerRepresentation())
551 return getIndirectReturnResult(RetTy, State);
556 RetTy = ED->getIntegerType();
558 if (
const auto *EIT = RetTy->
getAs<BitIntType>())
559 if (EIT->getNumBits() > 64)
560 return getIndirectReturnResult(RetTy, State);
566unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
567 unsigned Align)
const {
570 if (Align <= MinABIStackAlignInBytes)
578 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
582 if (!IsDarwinVectorABI) {
584 return MinABIStackAlignInBytes;
592 return MinABIStackAlignInBytes;
595ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
bool ByVal,
596 CCState &State)
const {
598 if (State.FreeRegs) {
601 return getNaturalAlignIndirectInReg(Ty);
603 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
608 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
609 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
613 getDataLayout().getAllocaAddrSpace(),
618 bool Realign = TypeAlign > StackAlign;
621 getDataLayout().getAllocaAddrSpace(),
true,
625X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty)
const {
630 if (
const BuiltinType *BT =
T->
getAs<BuiltinType>()) {
632 if (K == BuiltinType::Float || K == BuiltinType::Double)
638bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State)
const {
639 if (!IsSoftFloatABI) {
645 unsigned Size = getContext().getTypeSize(Ty);
646 unsigned SizeInRegs = (
Size + 31) / 32;
652 if (SizeInRegs > State.FreeRegs) {
661 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
665 State.FreeRegs -= SizeInRegs;
669bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
671 bool &NeedsPadding)
const {
678 NeedsPadding =
false;
681 if (!updateFreeRegs(Ty, State))
687 if (State.CC == llvm::CallingConv::X86_FastCall ||
688 State.CC == llvm::CallingConv::X86_VectorCall ||
689 State.CC == llvm::CallingConv::X86_RegCall) {
690 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
699bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State)
const {
700 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
704 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
705 State.CC == llvm::CallingConv::X86_VectorCall))
708 if (!updateFreeRegs(Ty, State))
711 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
718void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State)
const {
728 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.
arguments();
729 for (
int I = 0, E = Args.size(); I < E; ++I) {
732 const QualType &Ty = Args[I].type;
734 isHomogeneousAggregate(Ty, Base, NumElts)) {
735 if (State.FreeSSERegs >= NumElts) {
736 State.FreeSSERegs -= NumElts;
738 State.IsPreassigned.set(I);
744ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
745 unsigned ArgIndex)
const {
747 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
748 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
749 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
752 TypeInfo TI = getContext().getTypeInfo(Ty);
759 return getIndirectResult(Ty,
false, State);
760 }
else if (State.IsDelegateCall) {
763 ABIArgInfo Res = getIndirectResult(Ty,
false, State);
776 if ((IsRegCall || IsVectorCall) &&
777 isHomogeneousAggregate(Ty, Base, NumElts)) {
778 if (State.FreeSSERegs >= NumElts) {
779 State.FreeSSERegs -= NumElts;
784 return getDirectX86Hva();
792 return getIndirectResult(Ty,
false, State);
799 RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
800 return getIndirectResult(Ty,
true, State);
803 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
810 llvm::LLVMContext &LLVMContext = getVMContext();
811 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
812 bool NeedsPadding =
false;
814 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
815 unsigned SizeInRegs = (TI.
Width + 31) / 32;
816 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
817 llvm::Type *
Result = llvm::StructType::get(LLVMContext, Elements);
823 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
830 if (IsWin32StructABI && State.Required.
isRequiredArg(ArgIndex)) {
831 unsigned AlignInBits = 0;
833 const ASTRecordLayout &Layout =
834 getContext().getASTRecordLayout(RT->getOriginalDecl());
837 AlignInBits = TI.
Align;
839 if (AlignInBits > 32)
840 return getIndirectResult(Ty,
false, State);
849 if (TI.
Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
850 canExpandIndirectArgument(Ty))
852 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
854 return getIndirectResult(Ty,
true, State);
857 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
861 if (IsWin32StructABI) {
862 if (TI.
Width <= 512 && State.FreeSSERegs > 0) {
866 return getIndirectResult(Ty,
false, State);
871 if (IsDarwinVectorABI) {
873 (TI.
Width == 64 && VT->getNumElements() == 1))
875 llvm::IntegerType::get(getVMContext(), TI.
Width));
878 if (IsX86_MMXType(CGT.ConvertType(Ty)))
885 Ty = ED->getIntegerType();
887 bool InReg = shouldPrimitiveUseInReg(Ty, State);
889 if (isPromotableIntegerTypeForABI(Ty)) {
895 if (
const auto *EIT = Ty->
getAs<BitIntType>()) {
896 if (EIT->getNumBits() <= 64) {
901 return getIndirectResult(Ty,
false, State);
909void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
913 else if (State.CC == llvm::CallingConv::X86_FastCall) {
915 State.FreeSSERegs = 3;
916 }
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
918 State.FreeSSERegs = 6;
921 else if (State.CC == llvm::CallingConv::X86_RegCall) {
923 State.FreeSSERegs = 8;
924 }
else if (IsWin32StructABI) {
927 State.FreeRegs = DefaultNumRegisterParameters;
928 State.FreeSSERegs = 3;
930 State.FreeRegs = DefaultNumRegisterParameters;
937 if (State.FreeRegs) {
950 if (State.CC == llvm::CallingConv::X86_VectorCall)
951 runVectorCallFirstPass(FI, State);
953 bool UsedInAlloca =
false;
954 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.
arguments();
955 for (
unsigned I = 0, E = Args.size(); I < E; ++I) {
957 if (State.IsPreassigned.test(I))
968 rewriteWithInAlloca(FI);
972X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
973 CharUnits &StackOffset, ABIArgInfo &Info,
974 QualType
Type)
const {
977 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
982 bool IsIndirect =
false;
986 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
988 LLTy = llvm::PointerType::getUnqual(getVMContext());
989 FrameFields.push_back(LLTy);
990 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
993 CharUnits FieldEnd = StackOffset;
994 StackOffset = FieldEnd.
alignTo(WordSize);
995 if (StackOffset != FieldEnd) {
996 CharUnits NumBytes = StackOffset - FieldEnd;
997 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
998 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
999 FrameFields.push_back(Ty);
1022 llvm_unreachable(
"invalid enum");
1025void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI)
const {
1026 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1029 SmallVector<llvm::Type *, 6> FrameFields;
1034 CharUnits StackOffset;
1041 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1043 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1048 if (
Ret.isIndirect() && !
Ret.getInReg()) {
1049 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
1051 Ret.setInAllocaSRet(IsWin32StructABI);
1059 for (; I != E; ++I) {
1061 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1064 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1069RValue X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1070 QualType Ty, AggValueSlot Slot)
const {
1072 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1074 CCState State(*
const_cast<CGFunctionInfo *
>(CGF.
CurFnInfo));
1085 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
1092bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1093 const llvm::Triple &Triple,
const CodeGenOptions &Opts) {
1094 assert(Triple.getArch() == llvm::Triple::x86);
1096 switch (Opts.getStructReturnConvention()) {
1105 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1108 switch (Triple.getOS()) {
1109 case llvm::Triple::DragonFly:
1110 case llvm::Triple::FreeBSD:
1111 case llvm::Triple::OpenBSD:
1112 case llvm::Triple::Win32:
1121 if (!FD->
hasAttr<AnyX86InterruptAttr>())
1125 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1131 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1132 Fn->getContext(), ByValTy);
1133 Fn->addParamAttr(0, NewAttr);
1136void X86_32TargetCodeGenInfo::setTargetAttributes(
1137 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1138 if (GV->isDeclaration())
1140 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1141 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1143 Fn->addFnAttr(
"stackrealign");
1150bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1151 CodeGen::CodeGenFunction &CGF,
1152 llvm::Value *Address)
const {
1153 CodeGen::CGBuilderTy &Builder = CGF.
Builder;
1155 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1166 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1172 Builder.CreateAlignedStore(
1173 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1179 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1194static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1196 case X86AVXABILevel::AVX512:
1198 case X86AVXABILevel::AVX:
1200 case X86AVXABILevel::None:
1203 llvm_unreachable(
"Unknown AVXLevel");
1207class X86_64ABIInfo :
public ABIInfo {
1244 void postMerge(
unsigned AggregateSize,
Class &Lo,
Class &Hi)
const;
1272 void classify(QualType
T, uint64_t OffsetBase,
Class &Lo,
Class &Hi,
1273 bool isNamedArg,
bool IsRegCall =
false)
const;
1275 llvm::Type *GetByteVectorType(QualType Ty)
const;
1276 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1277 unsigned IROffset, QualType SourceTy,
1278 unsigned SourceOffset)
const;
1279 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1280 unsigned IROffset, QualType SourceTy,
1281 unsigned SourceOffset)
const;
1285 ABIArgInfo getIndirectReturnResult(QualType Ty)
const;
1292 ABIArgInfo getIndirectResult(QualType Ty,
unsigned freeIntRegs)
const;
1297 unsigned &neededInt,
unsigned &neededSSE,
1299 bool IsRegCall =
false)
const;
1301 ABIArgInfo classifyRegCallStructType(QualType Ty,
unsigned &NeededInt,
1302 unsigned &NeededSSE,
1303 unsigned &MaxVectorWidth)
const;
1305 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty,
unsigned &NeededInt,
1306 unsigned &NeededSSE,
1307 unsigned &MaxVectorWidth)
const;
1309 bool IsIllegalVectorType(QualType Ty)
const;
1316 bool honorsRevision0_98()
const {
1317 return !getTarget().getTriple().isOSDarwin();
1322 bool classifyIntegerMMXAsSSE()
const {
1324 if (getContext().getLangOpts().getClangABICompat() <=
1325 LangOptions::ClangABI::Ver3_8)
1328 const llvm::Triple &Triple = getTarget().getTriple();
1329 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1335 bool passInt128VectorsInMem()
const {
1337 if (getContext().getLangOpts().getClangABICompat() <=
1338 LangOptions::ClangABI::Ver9)
1341 const llvm::Triple &
T = getTarget().getTriple();
1342 return T.isOSLinux() ||
T.isOSNetBSD();
1345 bool returnCXXRecordGreaterThan128InMem()
const {
1347 if (getContext().getLangOpts().getClangABICompat() <=
1348 LangOptions::ClangABI::Ver20 ||
1349 getTarget().getTriple().isPS())
1358 bool Has64BitPointers;
1361 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1362 : ABIInfo(CGT), AVXLevel(AVXLevel),
1363 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1365 bool isPassedUsingAVXType(QualType
type)
const {
1366 unsigned neededInt, neededSSE;
1372 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1373 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1378 void computeInfo(CGFunctionInfo &FI)
const override;
1380 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1381 AggValueSlot Slot)
const override;
1382 RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1383 AggValueSlot Slot)
const override;
1385 bool has64BitPointers()
const {
1386 return Has64BitPointers;
1391class WinX86_64ABIInfo :
public ABIInfo {
1393 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1394 : ABIInfo(CGT), AVXLevel(AVXLevel),
1395 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1397 void computeInfo(CGFunctionInfo &FI)
const override;
1399 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1400 AggValueSlot Slot)
const override;
1402 bool isHomogeneousAggregateBaseType(QualType Ty)
const override {
1404 return isX86VectorTypeForVectorCall(getContext(), Ty);
1407 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1408 uint64_t NumMembers)
const override {
1410 return isX86VectorCallAggregateSmallEnough(NumMembers);
1414 ABIArgInfo classify(QualType Ty,
unsigned &FreeSSERegs,
bool IsReturnType,
1415 bool IsVectorCall,
bool IsRegCall)
const;
1416 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty,
unsigned &FreeSSERegs,
1417 const ABIArgInfo ¤t)
const;
1424class X86_64TargetCodeGenInfo :
public TargetCodeGenInfo {
1426 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1427 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
1429 std::make_unique<SwiftABIInfo>(CGT,
true);
1434 bool markARCOptimizedReturnCallsAsNoTail()
const override {
return true; }
1436 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
1440 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1441 llvm::Value *Address)
const override {
1442 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1450 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1451 StringRef Constraint,
1452 llvm::Type* Ty)
const override {
1453 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1456 bool isNoProtoCallVariadic(
const CallArgList &args,
1457 const FunctionNoProtoType *fnType)
const override {
1465 bool HasAVXType =
false;
1466 for (
const CallArg &arg : args) {
1467 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(
arg.Ty)) {
1480 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1481 CodeGen::CodeGenModule &CGM)
const override {
1482 if (GV->isDeclaration())
1484 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1485 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1487 Fn->addFnAttr(
"stackrealign");
1494 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
1495 const FunctionDecl *Caller,
1496 const FunctionDecl *Callee,
const CallArgList &Args,
1497 QualType ReturnType)
const override;
1502 llvm::StringMap<bool> &CallerMap,
1504 llvm::StringMap<bool> &CalleeMap,
1506 if (CalleeMap.empty() && CallerMap.empty()) {
1518 const llvm::StringMap<bool> &CallerMap,
1519 const llvm::StringMap<bool> &CalleeMap,
1522 bool CallerHasFeat = CallerMap.lookup(
Feature);
1523 bool CalleeHasFeat = CalleeMap.lookup(
Feature);
1525 if (!CallerHasFeat && !CalleeHasFeat &&
1526 (!Callee.isExternallyVisible() || Callee.hasAttr<AlwaysInlineAttr>()))
1529 if (!CallerHasFeat && !CalleeHasFeat)
1530 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1531 << IsArgument << Ty <<
Feature;
1534 if (!CallerHasFeat || !CalleeHasFeat)
1535 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1536 << IsArgument << Ty <<
Feature;
1545 const llvm::StringMap<bool> &CallerMap,
1546 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
1551 "avx512f", IsArgument);
1560void X86_64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
1561 SourceLocation CallLoc,
1562 const FunctionDecl *Caller,
1563 const FunctionDecl *Callee,
1564 const CallArgList &Args,
1565 QualType ReturnType)
const {
1569 llvm::StringMap<bool> CallerMap;
1570 llvm::StringMap<bool> CalleeMap;
1571 unsigned ArgIndex = 0;
1575 for (
const CallArg &Arg : Args) {
1583 if (Arg.getType()->isVectorType() &&
1586 QualType Ty = Arg.getType();
1589 if (ArgIndex < Callee->getNumParams())
1590 Ty =
Callee->getParamDecl(ArgIndex)->getType();
1593 CallerMap, CalleeMap, Ty,
true))
1601 if (
Callee->getReturnType()->isVectorType() &&
1605 CalleeMap,
Callee->getReturnType(),
1614 bool Quote = Lib.contains(
' ');
1615 std::string ArgStr = Quote ?
"\"" :
"";
1617 if (!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1619 ArgStr += Quote ?
"\"" :
"";
1624class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1627 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1628 unsigned NumRegisterParameters)
1629 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1630 Win32StructABI, NumRegisterParameters,
false) {}
1632 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1635 void getDependentLibraryOption(llvm::StringRef Lib,
1637 Opt =
"/DEFAULTLIB:";
1638 Opt += qualifyWindowsLibrary(Lib);
1641 void getDetectMismatchOption(llvm::StringRef Name,
1642 llvm::StringRef
Value,
1644 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1649void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1650 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1651 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1652 if (GV->isDeclaration())
1654 addStackProbeTargetAttributes(D, GV, CGM);
1658class WinX86_64TargetCodeGenInfo :
public TargetCodeGenInfo {
1660 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1662 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
1664 std::make_unique<SwiftABIInfo>(CGT,
true);
1667 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1668 CodeGen::CodeGenModule &CGM)
const override;
1670 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
1674 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1675 llvm::Value *Address)
const override {
1676 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1684 void getDependentLibraryOption(llvm::StringRef Lib,
1685 llvm::SmallString<24> &Opt)
const override {
1686 Opt =
"/DEFAULTLIB:";
1687 Opt += qualifyWindowsLibrary(Lib);
1690 void getDetectMismatchOption(llvm::StringRef Name,
1691 llvm::StringRef
Value,
1692 llvm::SmallString<32> &Opt)
const override {
1693 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1698void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1699 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1701 if (GV->isDeclaration())
1703 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1704 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1706 Fn->addFnAttr(
"stackrealign");
1712 addStackProbeTargetAttributes(D, GV, CGM);
1715void X86_64ABIInfo::postMerge(
unsigned AggregateSize,
Class &Lo,
1740 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1742 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1744 if (Hi == SSEUp && Lo != SSE)
1748X86_64ABIInfo::Class X86_64ABIInfo::merge(
Class Accum,
Class Field) {
1772 assert((Accum != Memory && Accum != ComplexX87) &&
1773 "Invalid accumulated classification during merge.");
1774 if (Accum == Field || Field == NoClass)
1776 if (Field == Memory)
1778 if (Accum == NoClass)
1782 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1783 Accum == X87 || Accum == X87Up)
1788void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Class &Lo,
1789 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
1800 Class &Current = OffsetBase < 64 ? Lo : Hi;
1803 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
1806 if (k == BuiltinType::Void) {
1808 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1811 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1813 }
else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1814 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1816 }
else if (k == BuiltinType::Float128) {
1819 }
else if (k == BuiltinType::LongDouble) {
1820 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1821 if (LDF == &llvm::APFloat::IEEEquad()) {
1824 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1827 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
1830 llvm_unreachable(
"unexpected long double representation!");
1839 classify(ED->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1850 if (Has64BitPointers) {
1857 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1858 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1859 if (EB_FuncPtr != EB_ThisAdj) {
1871 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
1873 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1882 uint64_t EB_Lo = (OffsetBase) / 64;
1886 }
else if (Size == 64) {
1887 QualType ElementType = VT->getElementType();
1896 if (!classifyIntegerMMXAsSSE() &&
1907 if (OffsetBase && OffsetBase != 64)
1909 }
else if (Size == 128 ||
1910 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1911 QualType ElementType = VT->getElementType();
1914 if (passInt128VectorsInMem() && Size != 128 &&
1939 if (
const ComplexType *CT = Ty->
getAs<ComplexType>()) {
1946 else if (Size <= 128)
1948 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
1951 }
else if (ET == getContext().DoubleTy) {
1953 }
else if (ET == getContext().LongDoubleTy) {
1954 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1955 if (LDF == &llvm::APFloat::IEEEquad())
1957 else if (LDF == &llvm::APFloat::x87DoubleExtended())
1958 Current = ComplexX87;
1959 else if (LDF == &llvm::APFloat::IEEEdouble())
1962 llvm_unreachable(
"unexpected long double representation!");
1967 uint64_t EB_Real = (OffsetBase) / 64;
1968 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1969 if (Hi == NoClass && EB_Real != EB_Imag)
1975 if (
const auto *EITy = Ty->
getAs<BitIntType>()) {
1976 if (EITy->getNumBits() <= 64)
1978 else if (EITy->getNumBits() <= 128)
1984 if (
const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1993 if (!IsRegCall && Size > 512)
2000 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2006 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2007 uint64_t ArraySize = AT->getZExtSize();
2014 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2017 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2018 Class FieldLo, FieldHi;
2019 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2020 Lo = merge(Lo, FieldLo);
2021 Hi = merge(Hi, FieldHi);
2022 if (Lo == Memory || Hi == Memory)
2026 postMerge(Size, Lo, Hi);
2027 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2045 const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
2051 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2057 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2058 for (
const auto &I : CXXRD->bases()) {
2059 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2060 "Unexpected base class!");
2061 const auto *
Base = I.getType()->castAsCXXRecordDecl();
2067 Class FieldLo, FieldHi;
2070 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2071 Lo = merge(Lo, FieldLo);
2072 Hi = merge(Hi, FieldHi);
2073 if (returnCXXRecordGreaterThan128InMem() &&
2074 (Size > 128 && (Size != getContext().getTypeSize(I.getType()) ||
2075 Size > getNativeVectorSizeForAVXABI(AVXLevel)))) {
2080 if (Lo == Memory || Hi == Memory) {
2081 postMerge(Size, Lo, Hi);
2089 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2090 LangOptions::ClangABI::Ver11 ||
2091 getContext().getTargetInfo().getTriple().isPS();
2092 bool IsUnion = RT->isUnionType() && !UseClang11Compat;
2095 i != e; ++i, ++idx) {
2097 bool BitField = i->isBitField();
2100 if (BitField && i->isUnnamedBitField())
2113 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2114 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2116 postMerge(Size, Lo, Hi);
2121 Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
2123 if (!BitField && IsInMemory) {
2125 postMerge(Size, Lo, Hi);
2135 Class FieldLo, FieldHi;
2141 assert(!i->isUnnamedBitField());
2149 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2154 FieldHi = EB_Hi ?
Integer : NoClass;
2157 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2158 Lo = merge(Lo, FieldLo);
2159 Hi = merge(Hi, FieldHi);
2160 if (Lo == Memory || Hi == Memory)
2164 postMerge(Size, Lo, Hi);
2168ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty)
const {
2174 Ty = ED->getIntegerType();
2177 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
2183 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
2186bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty)
const {
2187 if (
const VectorType *VecTy = Ty->
getAs<VectorType>()) {
2189 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2190 if (Size <= 64 || Size > LargestVector)
2192 QualType EltTy = VecTy->getElementType();
2193 if (passInt128VectorsInMem() &&
2202ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2203 unsigned freeIntRegs)
const {
2216 Ty = ED->getIntegerType();
2223 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
2228 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2251 if (freeIntRegs == 0) {
2256 if (Align == 8 && Size <= 64)
2262 getDataLayout().getAllocaAddrSpace());
2267llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty)
const {
2271 Ty = QualType(InnerTy, 0);
2273 llvm::Type *IRType = CGT.ConvertType(Ty);
2277 if (passInt128VectorsInMem() &&
2281 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2288 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2293 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2297 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2313 unsigned TySize = (
unsigned)Context.getTypeSize(Ty);
2314 if (TySize <= StartBit)
2318 unsigned EltSize = (
unsigned)Context.getTypeSize(AT->getElementType());
2319 unsigned NumElts = (
unsigned)AT->getZExtSize();
2322 for (
unsigned i = 0; i != NumElts; ++i) {
2324 unsigned EltOffset = i*EltSize;
2325 if (EltOffset >= EndBit)
break;
2327 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2329 EndBit-EltOffset, Context))
2340 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2341 for (
const auto &I : CXXRD->bases()) {
2342 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2343 "Unexpected base class!");
2344 const auto *
Base = I.getType()->castAsCXXRecordDecl();
2348 if (BaseOffset >= EndBit)
continue;
2350 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2352 EndBit-BaseOffset, Context))
2363 i != e; ++i, ++idx) {
2367 if (FieldOffset >= EndBit)
break;
2369 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2385 const llvm::DataLayout &TD) {
2386 if (IROffset == 0 && IRType->isFloatingPointTy())
2390 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2391 if (!STy->getNumContainedTypes())
2394 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2395 unsigned Elt = SL->getElementContainingOffset(IROffset);
2396 IROffset -= SL->getElementOffset(Elt);
2401 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2402 llvm::Type *EltTy = ATy->getElementType();
2403 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2404 IROffset -= IROffset / EltSize * EltSize;
2413llvm::Type *X86_64ABIInfo::
2414GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2415 QualType SourceTy,
unsigned SourceOffset)
const {
2416 const llvm::DataLayout &TD = getDataLayout();
2417 unsigned SourceSize =
2418 (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2420 if (!T0 || T0->isDoubleTy())
2421 return llvm::Type::getDoubleTy(getVMContext());
2424 llvm::Type *T1 =
nullptr;
2425 unsigned T0Size = TD.getTypeAllocSize(T0);
2426 if (SourceSize > T0Size)
2428 if (T1 ==
nullptr) {
2431 if (T0->is16bitFPTy() && SourceSize > 4)
2440 if (T0->isFloatTy() && T1->isFloatTy())
2441 return llvm::FixedVectorType::get(T0, 2);
2443 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2444 llvm::Type *T2 =
nullptr;
2448 return llvm::FixedVectorType::get(T0, 2);
2449 return llvm::FixedVectorType::get(T0, 4);
2452 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2453 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2455 return llvm::Type::getDoubleTy(getVMContext());
2472llvm::Type *X86_64ABIInfo::
2473GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2474 QualType SourceTy,
unsigned SourceOffset)
const {
2477 if (IROffset == 0) {
2480 IRType->isIntegerTy(64))
2489 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2490 IRType->isIntegerTy(32) ||
2496 SourceOffset*8+64, getContext()))
2501 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2503 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2504 if (IROffset < SL->getSizeInBytes()) {
2505 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2506 IROffset -= SL->getElementOffset(FieldIdx);
2508 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2509 SourceTy, SourceOffset);
2513 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2514 llvm::Type *EltTy = ATy->getElementType();
2515 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2516 unsigned EltOffset = IROffset/EltSize*EltSize;
2517 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2523 if (IRType->isIntegerTy(128)) {
2524 assert(IROffset == 0);
2530 unsigned TySizeInBytes =
2531 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2533 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2537 return llvm::IntegerType::get(getVMContext(),
2538 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2549 const llvm::DataLayout &TD) {
2554 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2555 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2556 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2557 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2568 if (Lo->isHalfTy() || Lo->isFloatTy())
2569 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2571 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2572 &&
"Invalid/unknown lo type");
2573 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2577 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
2580 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2581 "Invalid x86-64 argument pair!");
2585ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy)
const {
2588 X86_64ABIInfo::Class Lo, Hi;
2589 classify(RetTy, 0, Lo, Hi,
true);
2592 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2593 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2595 llvm::Type *ResType =
nullptr;
2602 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2603 "Unknown missing lo part");
2608 llvm_unreachable(
"Invalid classification for lo word.");
2613 return getIndirectReturnResult(RetTy);
2618 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2625 RetTy = ED->getIntegerType();
2628 isPromotableIntegerTypeForABI(RetTy))
2632 if (ResType->isIntegerTy(128)) {
2642 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2648 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2655 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2656 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2657 llvm::Type::getX86_FP80Ty(getVMContext()));
2661 llvm::Type *HighPart =
nullptr;
2667 llvm_unreachable(
"Invalid classification for hi word.");
2674 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2679 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2690 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2691 ResType = GetByteVectorType(RetTy);
2702 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2719X86_64ABIInfo::classifyArgumentType(QualType Ty,
unsigned freeIntRegs,
2720 unsigned &neededInt,
unsigned &neededSSE,
2721 bool isNamedArg,
bool IsRegCall)
const {
2724 X86_64ABIInfo::Class Lo, Hi;
2725 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2729 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2730 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2734 llvm::Type *ResType =
nullptr;
2741 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2742 "Unknown missing lo part");
2755 return getIndirectResult(Ty, freeIntRegs);
2759 llvm_unreachable(
"Invalid classification for lo word.");
2768 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2775 Ty = ED->getIntegerType();
2778 isPromotableIntegerTypeForABI(Ty))
2782 if (ResType->isIntegerTy(128)) {
2793 llvm::Type *IRType = CGT.ConvertType(Ty);
2794 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2800 llvm::Type *HighPart =
nullptr;
2808 llvm_unreachable(
"Invalid classification for hi word.");
2810 case NoClass:
break;
2815 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2826 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2836 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2837 ResType = GetByteVectorType(Ty);
2851X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty,
unsigned &NeededInt,
2852 unsigned &NeededSSE,
2853 unsigned &MaxVectorWidth)
const {
2856 ->getDefinitionOrSelf();
2859 return getIndirectReturnResult(Ty);
2862 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2863 if (CXXRD->isDynamicClass()) {
2864 NeededInt = NeededSSE = 0;
2865 return getIndirectReturnResult(Ty);
2868 for (
const auto &I : CXXRD->bases())
2869 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2872 NeededInt = NeededSSE = 0;
2873 return getIndirectReturnResult(Ty);
2878 for (
const auto *FD : RD->
fields()) {
2879 QualType MTy = FD->getType();
2881 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2884 NeededInt = NeededSSE = 0;
2885 return getIndirectReturnResult(Ty);
2888 unsigned LocalNeededInt, LocalNeededSSE;
2892 NeededInt = NeededSSE = 0;
2893 return getIndirectReturnResult(Ty);
2895 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
2896 MTy = AT->getElementType();
2897 if (
const auto *VT = MTy->
getAs<VectorType>())
2898 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2899 MaxVectorWidth = getContext().getTypeSize(VT);
2900 NeededInt += LocalNeededInt;
2901 NeededSSE += LocalNeededSSE;
2909X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
unsigned &NeededInt,
2910 unsigned &NeededSSE,
2911 unsigned &MaxVectorWidth)
const {
2917 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2921void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
2928 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2929 Win64ABIInfo.computeInfo(FI);
2933 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
2936 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2937 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2938 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2945 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2946 FreeIntRegs -= NeededInt;
2947 FreeSSERegs -= NeededSSE;
2953 ->
getAs<ComplexType>()
2954 ->getElementType()) ==
2955 getContext().LongDoubleTy)
2967 else if (NeededSSE && MaxVectorWidth > 0)
2979 it != ie; ++it, ++ArgNo) {
2980 bool IsNamedArg = ArgNo < NumRequiredArgs;
2982 if (IsRegCall && it->type->isStructureOrClassType())
2983 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2987 NeededSSE, IsNamedArg);
2993 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2994 FreeIntRegs -= NeededInt;
2995 FreeSSERegs -= NeededSSE;
2999 it->info = getIndirectResult(it->type, FreeIntRegs);
3008 llvm::Value *overflow_arg_area =
3023 llvm::Value *Res = overflow_arg_area;
3030 uint64_t SizeInBytes = (CGF.
getContext().getTypeSize(Ty) + 7) / 8;
3031 llvm::Value *Offset =
3032 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3034 Offset,
"overflow_arg_area.next");
3038 return Address(Res, LTy, Align);
3041RValue X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3042 QualType Ty, AggValueSlot Slot)
const {
3050 unsigned neededInt, neededSSE;
3062 if (!neededInt && !neededSSE)
3078 llvm::Value *InRegs =
nullptr;
3080 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3084 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3085 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3091 llvm::Value *FitsInFP =
3092 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3093 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3094 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3100 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3121 if (neededInt && neededSSE) {
3123 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3127 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3128 llvm::Type *TyLo = ST->getElementType(0);
3129 llvm::Type *TyHi = ST->getElementType(1);
3130 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3131 "Unexpected ABI info for mixed regs");
3132 llvm::Value *GPAddr =
3134 llvm::Value *FPAddr =
3136 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3137 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3153 }
else if (neededInt || neededSSE == 1) {
3155 auto TInfo = getContext().getTypeInfoInChars(Ty);
3156 uint64_t TySize = TInfo.Width.getQuantity();
3157 CharUnits TyAlign = TInfo.Align;
3158 llvm::Type *CoTy =
nullptr;
3162 llvm::Value *GpOrFpOffset = neededInt ? gp_offset : fp_offset;
3163 uint64_t Alignment = neededInt ? 8 : 16;
3164 uint64_t RegSize = neededInt ? neededInt * 8 : 16;
3189 llvm::Value *PtrOffset =
3191 Address Dst = Address(
3211 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3244 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3249 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3263 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3268RValue X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3269 QualType Ty, AggValueSlot Slot)
const {
3272 uint64_t Width = getContext().getTypeSize(Ty);
3273 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3281ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3282 QualType Ty,
unsigned &FreeSSERegs,
const ABIArgInfo ¤t)
const {
3287 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3288 FreeSSERegs -= NumElts;
3289 return getDirectX86Hva();
3294ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty,
unsigned &FreeSSERegs,
3295 bool IsReturnType,
bool IsVectorCall,
3296 bool IsRegCall)
const {
3302 Ty = ED->getIntegerType();
3304 TypeInfo Info = getContext().getTypeInfo(Ty);
3306 CharUnits Align = getContext().toCharUnitsFromBits(Info.
Align);
3310 if (!IsReturnType) {
3312 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3316 if (RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
3317 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3325 if ((IsVectorCall || IsRegCall) &&
3326 isHomogeneousAggregate(Ty, Base, NumElts)) {
3328 if (FreeSSERegs >= NumElts) {
3329 FreeSSERegs -= NumElts;
3335 Align, getDataLayout().getAllocaAddrSpace(),
3337 }
else if (IsVectorCall) {
3338 if (FreeSSERegs >= NumElts &&
3340 FreeSSERegs -= NumElts;
3342 }
else if (IsReturnType) {
3347 Align, getDataLayout().getAllocaAddrSpace(),
3356 llvm::Type *LLTy = CGT.ConvertType(Ty);
3357 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3364 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3365 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3372 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
3373 switch (BT->getKind()) {
3374 case BuiltinType::Bool:
3379 case BuiltinType::LongDouble:
3383 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3384 if (LDF == &llvm::APFloat::x87DoubleExtended())
3386 Align, getDataLayout().getAllocaAddrSpace(),
3391 case BuiltinType::Int128:
3392 case BuiltinType::UInt128:
3393 case BuiltinType::Float128:
3401 Align, getDataLayout().getAllocaAddrSpace(),
3409 llvm::Type::getInt64Ty(getVMContext()), 2));
3425 Align, getDataLayout().getAllocaAddrSpace(),
3432void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
3434 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3435 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3439 if (CC == llvm::CallingConv::X86_64_SysV) {
3440 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3441 SysVABIInfo.computeInfo(FI);
3445 unsigned FreeSSERegs = 0;
3449 }
else if (IsRegCall) {
3456 IsVectorCall, IsRegCall);
3461 }
else if (IsRegCall) {
3466 unsigned ArgNum = 0;
3467 unsigned ZeroSSERegs = 0;
3472 unsigned *MaybeFreeSSERegs =
3473 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3475 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3483 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3487RValue WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3488 QualType Ty, AggValueSlot Slot)
const {
3491 uint64_t Width = getContext().getTypeSize(Ty);
3492 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3501 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3502 unsigned NumRegisterParameters,
bool SoftFloatABI) {
3503 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3505 return std::make_unique<X86_32TargetCodeGenInfo>(
3506 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3507 NumRegisterParameters, SoftFloatABI);
3511 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3512 unsigned NumRegisterParameters) {
3513 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3515 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3516 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3517 NumRegisterParameters);
3520std::unique_ptr<TargetCodeGenInfo>
3523 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3526std::unique_ptr<TargetCodeGenInfo>
3529 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const FunctionDecl &Callee, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const FunctionDecl &Callee, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
unsigned getDirectOffset() const
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getIndirect(CharUnits Alignment, unsigned AddrSpace, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
llvm::LLVMContext & getLLVMContext()
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool isRequiredArg(unsigned argIdx) const
Return true if the argument at a given index is required.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
CallingConv getCallConv() const
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
specific_decl_iterator< FieldDecl > field_iterator
field_iterator field_begin() const
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBlockPointerType() const
bool isFloat16Type() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
bool shouldPassIndirectly(CodeGenModule &CGM, ArrayRef< llvm::Type * > types, bool asReturnValue)
Should an aggregate which expands to the given type sequence be passed/returned indirectly under swif...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "singleelement struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Result
The result type of a method or function.
const FunctionProtoType * T
@ Type
The name was classified as a type.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
@ Class
The "class" keyword introduces the elaborated-type-specifier.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty