10#include "TargetInfo.h"
12#include "llvm/ADT/SmallBitVector.h"
20bool IsX86_MMXType(llvm::Type *IRType) {
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
24 IRType->getScalarSizeInBits() != 64;
30 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
31 .Cases({
"y",
"&y",
"^Ym"},
true)
33 if (IsMMXCons && Ty->isVectorTy() &&
38 if (Constraint ==
"k") {
40 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
51 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
52 if (BT->getKind() == BuiltinType::LongDouble) {
53 if (&Context.getTargetInfo().getLongDoubleFormat() ==
54 &llvm::APFloat::x87DoubleExtended())
62 unsigned VecSize = Context.getTypeSize(VT);
63 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
71static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
72 return NumMembers <= 4;
76static ABIArgInfo getDirectX86Hva(llvm::Type* T =
nullptr) {
79 AI.setCanBeFlattened(
false);
89 CCState(CGFunctionInfo &FI)
90 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
91 Required(FI.getRequiredArgs()), IsDelegateCall(FI.isDelegateCall()) {}
93 llvm::SmallBitVector IsPreassigned;
94 unsigned CC = CallingConv::CC_C;
95 unsigned FreeRegs = 0;
96 unsigned FreeSSERegs = 0;
97 RequiredArgs Required;
98 bool IsDelegateCall =
false;
102class X86_32ABIInfo :
public ABIInfo {
108 static const unsigned MinABIStackAlignInBytes = 4;
110 bool IsDarwinVectorABI;
111 bool IsRetSmallStructInRegABI;
112 bool IsWin32StructABI;
116 unsigned DefaultNumRegisterParameters;
118 static bool isRegisterSize(
unsigned Size) {
119 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
122 bool isHomogeneousAggregateBaseType(QualType Ty)
const override {
124 return isX86VectorTypeForVectorCall(getContext(), Ty);
127 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
128 uint64_t NumMembers)
const override {
130 return isX86VectorCallAggregateSmallEnough(NumMembers);
133 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context)
const;
137 ABIArgInfo getIndirectResult(QualType Ty,
bool ByVal, CCState &State)
const;
139 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State)
const;
142 unsigned getTypeStackAlignInBytes(QualType Ty,
unsigned Align)
const;
144 Class classify(QualType Ty)
const;
147 unsigned ArgIndex)
const;
151 bool updateFreeRegs(QualType Ty, CCState &State)
const;
153 bool shouldAggregateUseDirect(QualType Ty, CCState &State,
bool &InReg,
154 bool &NeedsPadding)
const;
155 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State)
const;
157 bool canExpandIndirectArgument(QualType Ty)
const;
161 void rewriteWithInAlloca(CGFunctionInfo &FI)
const;
163 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
164 CharUnits &StackOffset, ABIArgInfo &Info,
165 QualType
Type)
const;
166 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State)
const;
170 void computeInfo(CGFunctionInfo &FI)
const override;
171 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
172 AggValueSlot Slot)
const override;
174 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT,
bool DarwinVectorABI,
175 bool RetSmallStructInRegABI,
bool Win32StructABI,
176 unsigned NumRegisterParameters,
bool SoftFloatABI)
177 : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
178 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
179 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
180 IsMCUABI(CGT.getTarget().
getTriple().isOSIAMCU()),
181 IsLinuxABI(CGT.getTarget().
getTriple().isOSLinux() ||
182 CGT.getTarget().
getTriple().isOSCygMing()),
183 DefaultNumRegisterParameters(NumRegisterParameters) {}
188 explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
189 : SwiftABIInfo(CGT,
false) {}
192 bool AsReturnValue)
const override {
197 return occupiesMoreThan(ComponentTys, 3);
203 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
bool DarwinVectorABI,
204 bool RetSmallStructInRegABI,
bool Win32StructABI,
205 unsigned NumRegisterParameters,
bool SoftFloatABI)
206 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
207 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
208 NumRegisterParameters, SoftFloatABI)) {
209 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
212 static bool isStructReturnInRegABI(
213 const llvm::Triple &Triple,
const CodeGenOptions &Opts);
215 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
216 CodeGen::CodeGenModule &CGM)
const override;
218 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
224 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
225 llvm::Value *Address)
const override;
227 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
228 StringRef Constraint,
229 llvm::Type* Ty)
const override {
230 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
233 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
234 std::string &Constraints,
235 std::vector<llvm::Type *> &ResultRegTypes,
236 std::vector<llvm::Type *> &ResultTruncRegTypes,
237 std::vector<LValue> &ResultRegDests,
238 std::string &AsmString,
239 unsigned NumOutputs)
const override;
241 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
242 return "movl\t%ebp, %ebp"
243 "\t\t// marker for objc_retainAutoreleaseReturnValue";
259 std::string &AsmString) {
261 llvm::raw_string_ostream OS(Buf);
263 while (Pos < AsmString.size()) {
264 size_t DollarStart = AsmString.find(
'$', Pos);
265 if (DollarStart == std::string::npos)
266 DollarStart = AsmString.size();
267 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
268 if (DollarEnd == std::string::npos)
269 DollarEnd = AsmString.size();
270 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
272 size_t NumDollars = DollarEnd - DollarStart;
273 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
275 size_t DigitStart = Pos;
276 if (AsmString[DigitStart] ==
'{') {
280 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
281 if (DigitEnd == std::string::npos)
282 DigitEnd = AsmString.size();
283 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
284 unsigned OperandIndex;
285 if (!OperandStr.getAsInteger(10, OperandIndex)) {
286 if (OperandIndex >= FirstIn)
287 OperandIndex += NumNewOuts;
295 AsmString = std::move(Buf);
299void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
301 std::vector<llvm::Type *> &ResultRegTypes,
302 std::vector<llvm::Type *> &ResultTruncRegTypes,
303 std::vector<LValue> &ResultRegDests, std::string &AsmString,
304 unsigned NumOutputs)
const {
309 if (!Constraints.empty())
311 if (RetWidth <= 32) {
312 Constraints +=
"={eax}";
313 ResultRegTypes.push_back(CGF.
Int32Ty);
317 ResultRegTypes.push_back(CGF.
Int64Ty);
321 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
322 ResultTruncRegTypes.push_back(CoerceTy);
325 ReturnSlot.setAddress(ReturnSlot.getAddress().withElementType(CoerceTy));
326 ResultRegDests.push_back(ReturnSlot);
333bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
334 ASTContext &Context)
const {
339 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
345 if (Size == 64 || Size == 128)
360 return shouldReturnTypeInRegister(AT->getElementType(), Context);
371 for (
const auto *FD : RD->fields()) {
377 if (!shouldReturnTypeInRegister(FD->getType(), Context))
386 Ty = CTy->getElementType();
395 uint64_t Size = Context.getTypeSize(Ty);
396 return Size == 32 || Size == 64;
401 for (
const auto *FD : RD->
fields()) {
411 if (FD->isBitField())
414 Size += Context.getTypeSize(FD->getType());
436bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty)
const {
442 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
443 if (!IsWin32StructABI) {
446 if (!CXXRD->isCLike())
450 if (CXXRD->isDynamicClass())
461 return Size == getContext().getTypeSize(Ty);
464ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State)
const {
467 if (State.CC != llvm::CallingConv::X86_FastCall &&
468 State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {
471 return getNaturalAlignIndirectInReg(RetTy);
473 return getNaturalAlignIndirect(
474 RetTy, getDataLayout().getAllocaAddrSpace(),
478ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
479 CCState &State)
const {
485 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
486 State.CC == llvm::CallingConv::X86_RegCall) &&
487 isHomogeneousAggregate(RetTy, Base, NumElts)) {
492 if (
const VectorType *VT = RetTy->
getAs<VectorType>()) {
494 if (IsDarwinVectorABI) {
502 llvm::Type::getInt64Ty(getVMContext()), 2));
506 if ((Size == 8 || Size == 16 || Size == 32) ||
507 (Size == 64 && VT->getNumElements() == 1))
511 return getIndirectReturnResult(RetTy, State);
521 return getIndirectReturnResult(RetTy, State);
525 return getIndirectReturnResult(RetTy, State);
532 if (
const ComplexType *CT = RetTy->
getAs<ComplexType>()) {
533 QualType ET = getContext().getCanonicalType(CT->getElementType());
536 llvm::Type::getHalfTy(getVMContext()), 2));
541 if (shouldReturnTypeInRegister(RetTy, getContext())) {
550 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
551 || SeltTy->hasPointerRepresentation())
559 return getIndirectReturnResult(RetTy, State);
564 RetTy = ED->getIntegerType();
566 if (
const auto *EIT = RetTy->
getAs<BitIntType>())
567 if (EIT->getNumBits() > 64)
568 return getIndirectReturnResult(RetTy, State);
574unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
575 unsigned Align)
const {
578 if (Align <= MinABIStackAlignInBytes)
586 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
590 if (!IsDarwinVectorABI) {
592 return MinABIStackAlignInBytes;
600 return MinABIStackAlignInBytes;
603ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
bool ByVal,
604 CCState &State)
const {
606 if (State.FreeRegs) {
609 return getNaturalAlignIndirectInReg(Ty);
611 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
616 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
617 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
621 getDataLayout().getAllocaAddrSpace(),
626 bool Realign = TypeAlign > StackAlign;
629 getDataLayout().getAllocaAddrSpace(),
true,
633X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty)
const {
638 if (
const BuiltinType *BT = T->
getAs<BuiltinType>()) {
640 if (K == BuiltinType::Float || K == BuiltinType::Double)
646bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State)
const {
647 if (!IsSoftFloatABI) {
653 unsigned Size = getContext().getTypeSize(Ty);
654 unsigned SizeInRegs = (
Size + 31) / 32;
660 if (SizeInRegs > State.FreeRegs) {
669 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
673 State.FreeRegs -= SizeInRegs;
677bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
679 bool &NeedsPadding)
const {
686 NeedsPadding =
false;
689 if (!updateFreeRegs(Ty, State))
695 if (State.CC == llvm::CallingConv::X86_FastCall ||
696 State.CC == llvm::CallingConv::X86_VectorCall ||
697 State.CC == llvm::CallingConv::X86_RegCall) {
698 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
707bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State)
const {
708 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
712 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
713 State.CC == llvm::CallingConv::X86_VectorCall))
716 if (!updateFreeRegs(Ty, State))
719 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
726void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State)
const {
736 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.
arguments();
737 for (
int I = 0, E = Args.size(); I < E; ++I) {
740 const QualType &Ty = Args[I].type;
742 isHomogeneousAggregate(Ty, Base, NumElts)) {
743 if (State.FreeSSERegs >= NumElts) {
744 State.FreeSSERegs -= NumElts;
746 State.IsPreassigned.set(I);
752ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
753 unsigned ArgIndex)
const {
755 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
756 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
757 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
760 TypeInfo TI = getContext().getTypeInfo(Ty);
767 return getIndirectResult(Ty,
false, State);
768 }
else if (State.IsDelegateCall) {
771 ABIArgInfo Res = getIndirectResult(Ty,
false, State);
784 if ((IsRegCall || IsVectorCall) &&
785 isHomogeneousAggregate(Ty, Base, NumElts)) {
786 if (State.FreeSSERegs >= NumElts) {
787 State.FreeSSERegs -= NumElts;
792 return getDirectX86Hva();
800 return getIndirectResult(Ty,
false, State);
806 if (RT && RT->getDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
807 return getIndirectResult(Ty,
true, State);
810 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
817 llvm::LLVMContext &LLVMContext = getVMContext();
818 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
819 bool NeedsPadding =
false;
821 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
822 unsigned SizeInRegs = (TI.
Width + 31) / 32;
823 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
824 llvm::Type *
Result = llvm::StructType::get(LLVMContext, Elements);
830 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
837 if (IsWin32StructABI && State.Required.
isRequiredArg(ArgIndex)) {
838 unsigned AlignInBits = 0;
840 const ASTRecordLayout &Layout =
841 getContext().getASTRecordLayout(RT->getDecl());
844 AlignInBits = TI.
Align;
846 if (AlignInBits > 32)
847 return getIndirectResult(Ty,
false, State);
856 if (TI.
Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
857 canExpandIndirectArgument(Ty))
859 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
861 return getIndirectResult(Ty,
true, State);
864 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
868 if (IsWin32StructABI) {
869 if (TI.
Width <= 512 && State.FreeSSERegs > 0) {
873 return getIndirectResult(Ty,
false, State);
878 if (IsDarwinVectorABI) {
880 (TI.
Width == 64 && VT->getNumElements() == 1))
882 llvm::IntegerType::get(getVMContext(), TI.
Width));
885 if (IsX86_MMXType(CGT.ConvertType(Ty)))
892 Ty = ED->getIntegerType();
894 bool InReg = shouldPrimitiveUseInReg(Ty, State);
896 if (isPromotableIntegerTypeForABI(Ty)) {
902 if (
const auto *EIT = Ty->
getAs<BitIntType>()) {
903 if (EIT->getNumBits() <= 64) {
908 return getIndirectResult(Ty,
false, State);
916void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
920 else if (State.CC == llvm::CallingConv::X86_FastCall) {
922 State.FreeSSERegs = 3;
923 }
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
925 State.FreeSSERegs = 6;
928 else if (State.CC == llvm::CallingConv::X86_RegCall) {
930 State.FreeSSERegs = 8;
931 }
else if (IsWin32StructABI) {
934 State.FreeRegs = DefaultNumRegisterParameters;
935 State.FreeSSERegs = 3;
937 State.FreeRegs = DefaultNumRegisterParameters;
944 if (State.FreeRegs) {
957 if (State.CC == llvm::CallingConv::X86_VectorCall)
958 runVectorCallFirstPass(FI, State);
960 bool UsedInAlloca =
false;
961 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.
arguments();
962 for (
unsigned I = 0, E = Args.size(); I < E; ++I) {
964 if (State.IsPreassigned.test(I))
975 rewriteWithInAlloca(FI);
979X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
980 CharUnits &StackOffset, ABIArgInfo &Info,
981 QualType
Type)
const {
984 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
989 bool IsIndirect =
false;
993 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
995 LLTy = llvm::PointerType::getUnqual(getVMContext());
996 FrameFields.push_back(LLTy);
997 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
1000 CharUnits FieldEnd = StackOffset;
1001 StackOffset = FieldEnd.
alignTo(WordSize);
1002 if (StackOffset != FieldEnd) {
1003 CharUnits NumBytes = StackOffset - FieldEnd;
1004 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1005 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1006 FrameFields.push_back(Ty);
1029 llvm_unreachable(
"invalid enum");
1032void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI)
const {
1033 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1036 SmallVector<llvm::Type *, 6> FrameFields;
1041 CharUnits StackOffset;
1048 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1050 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1055 if (
Ret.isIndirect() && !
Ret.getInReg()) {
1056 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
1058 Ret.setInAllocaSRet(IsWin32StructABI);
1066 for (; I != E; ++I) {
1068 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1071 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1076RValue X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1077 QualType Ty, AggValueSlot Slot)
const {
1079 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1081 CCState State(*
const_cast<CGFunctionInfo *
>(CGF.
CurFnInfo));
1092 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
1099bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1100 const llvm::Triple &Triple,
const CodeGenOptions &Opts) {
1101 assert(Triple.getArch() == llvm::Triple::x86);
1103 switch (Opts.getStructReturnConvention()) {
1112 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1115 switch (Triple.getOS()) {
1116 case llvm::Triple::DragonFly:
1117 case llvm::Triple::FreeBSD:
1118 case llvm::Triple::OpenBSD:
1119 case llvm::Triple::Win32:
1128 if (!FD->
hasAttr<AnyX86InterruptAttr>())
1132 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1138 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1139 Fn->getContext(), ByValTy);
1140 Fn->addParamAttr(0, NewAttr);
1143void X86_32TargetCodeGenInfo::setTargetAttributes(
1144 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1145 if (GV->isDeclaration())
1147 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1148 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1150 Fn->addFnAttr(
"stackrealign");
1157bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1158 CodeGen::CodeGenFunction &CGF,
1159 llvm::Value *Address)
const {
1160 CodeGen::CGBuilderTy &Builder = CGF.
Builder;
1162 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1173 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1179 Builder.CreateAlignedStore(
1180 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1186 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1201static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1203 case X86AVXABILevel::AVX512:
1205 case X86AVXABILevel::AVX:
1207 case X86AVXABILevel::None:
1210 llvm_unreachable(
"Unknown AVXLevel");
1214class X86_64ABIInfo :
public ABIInfo {
1251 void postMerge(
unsigned AggregateSize,
Class &Lo,
Class &Hi)
const;
1279 void classify(QualType T, uint64_t OffsetBase,
Class &Lo,
Class &Hi,
1280 bool isNamedArg,
bool IsRegCall =
false)
const;
1282 llvm::Type *GetByteVectorType(QualType Ty)
const;
1283 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1284 unsigned IROffset, QualType SourceTy,
1285 unsigned SourceOffset)
const;
1286 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1287 unsigned IROffset, QualType SourceTy,
1288 unsigned SourceOffset)
const;
1292 ABIArgInfo getIndirectReturnResult(QualType Ty)
const;
1299 ABIArgInfo getIndirectResult(QualType Ty,
unsigned freeIntRegs)
const;
1304 unsigned &neededInt,
unsigned &neededSSE,
1306 bool IsRegCall =
false)
const;
1308 ABIArgInfo classifyRegCallStructType(QualType Ty,
unsigned &NeededInt,
1309 unsigned &NeededSSE,
1310 unsigned &MaxVectorWidth)
const;
1312 bool passRegCallStructTypeDirectly(QualType Ty,
1313 SmallVectorImpl<llvm::Type *> &CoerceElts,
1314 unsigned &NeededInt,
unsigned &NeededSSE,
1315 unsigned &MaxVectorWidth)
const;
1317 bool IsIllegalVectorType(QualType Ty)
const;
1324 bool honorsRevision0_98()
const {
1325 return !getTarget().getTriple().isOSDarwin();
1330 bool classifyIntegerMMXAsSSE()
const {
1332 if (getContext().getLangOpts().getClangABICompat() <=
1333 LangOptions::ClangABI::Ver3_8)
1336 const llvm::Triple &Triple = getTarget().getTriple();
1337 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1343 bool passInt128VectorsInMem()
const {
1345 if (getContext().getLangOpts().getClangABICompat() <=
1346 LangOptions::ClangABI::Ver9)
1349 const llvm::Triple &T = getTarget().getTriple();
1350 return T.isOSLinux() || T.isOSNetBSD();
1353 bool returnCXXRecordGreaterThan128InMem()
const {
1355 if (getContext().getLangOpts().getClangABICompat() <=
1356 LangOptions::ClangABI::Ver20 ||
1366 bool Has64BitPointers;
1369 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1370 : ABIInfo(CGT), AVXLevel(AVXLevel),
1371 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1373 bool isPassedUsingAVXType(QualType
type)
const {
1374 unsigned neededInt, neededSSE;
1380 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1381 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1386 void computeInfo(CGFunctionInfo &FI)
const override;
1388 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1389 AggValueSlot Slot)
const override;
1390 RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1391 AggValueSlot Slot)
const override;
1393 bool has64BitPointers()
const {
1394 return Has64BitPointers;
1399class WinX86_64ABIInfo :
public ABIInfo {
1401 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1402 : ABIInfo(CGT), AVXLevel(AVXLevel),
1403 IsMingw64(getTarget().
getTriple().isWindowsGNUEnvironment()) {}
1405 void computeInfo(CGFunctionInfo &FI)
const override;
1407 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1408 AggValueSlot Slot)
const override;
1410 bool isHomogeneousAggregateBaseType(QualType Ty)
const override {
1412 return isX86VectorTypeForVectorCall(getContext(), Ty);
1415 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1416 uint64_t NumMembers)
const override {
1418 return isX86VectorCallAggregateSmallEnough(NumMembers);
1421 ABIArgInfo classifyArgForArm64ECVarArg(QualType Ty)
const override {
1422 unsigned FreeSSERegs = 0;
1423 return classify(Ty, FreeSSERegs,
false,
1428 ABIArgInfo classify(QualType Ty,
unsigned &FreeSSERegs,
bool IsReturnType,
1429 bool IsVectorCall,
bool IsRegCall)
const;
1430 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty,
unsigned &FreeSSERegs,
1431 const ABIArgInfo ¤t)
const;
1438class X86_64TargetCodeGenInfo :
public TargetCodeGenInfo {
1440 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1441 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
1443 std::make_unique<SwiftABIInfo>(CGT,
true);
1448 bool markARCOptimizedReturnCallsAsNoTail()
const override {
return true; }
1450 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
1454 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1455 llvm::Value *Address)
const override {
1456 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1464 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1465 StringRef Constraint,
1466 llvm::Type* Ty)
const override {
1467 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1470 bool isNoProtoCallVariadic(
const CallArgList &args,
1471 const FunctionNoProtoType *fnType)
const override {
1479 bool HasAVXType =
false;
1480 for (
const CallArg &arg : args) {
1481 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(
arg.Ty)) {
1494 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1495 CodeGen::CodeGenModule &CGM)
const override {
1496 if (GV->isDeclaration())
1498 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1499 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1501 Fn->addFnAttr(
"stackrealign");
1508 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
1509 const FunctionDecl *Caller,
1510 const FunctionDecl *Callee,
const CallArgList &Args,
1511 QualType ReturnType)
const override;
1516 llvm::StringMap<bool> &CallerMap,
1518 llvm::StringMap<bool> &CalleeMap,
1520 if (CalleeMap.empty() && CallerMap.empty()) {
1532 const llvm::StringMap<bool> &CallerMap,
1533 const llvm::StringMap<bool> &CalleeMap,
1536 bool CallerHasFeat = CallerMap.lookup(
Feature);
1537 bool CalleeHasFeat = CalleeMap.lookup(
Feature);
1539 if (!CallerHasFeat && !CalleeHasFeat &&
1540 (!Callee.isExternallyVisible() || Callee.hasAttr<AlwaysInlineAttr>()))
1543 if (!CallerHasFeat && !CalleeHasFeat)
1544 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1545 << IsArgument << Ty <<
Feature;
1548 if (!CallerHasFeat || !CalleeHasFeat)
1549 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1550 << IsArgument << Ty <<
Feature;
1559 const llvm::StringMap<bool> &CallerMap,
1560 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
1565 "avx512f", IsArgument);
1574void X86_64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
1575 SourceLocation CallLoc,
1576 const FunctionDecl *Caller,
1577 const FunctionDecl *Callee,
1578 const CallArgList &Args,
1579 QualType ReturnType)
const {
1583 llvm::StringMap<bool> CallerMap;
1584 llvm::StringMap<bool> CalleeMap;
1585 unsigned ArgIndex = 0;
1589 for (
const CallArg &Arg : Args) {
1597 if (Arg.getType()->isVectorType() &&
1600 QualType Ty = Arg.getType();
1603 if (ArgIndex < Callee->getNumParams())
1604 Ty =
Callee->getParamDecl(ArgIndex)->getType();
1607 CallerMap, CalleeMap, Ty,
true))
1615 if (
Callee->getReturnType()->isVectorType() &&
1619 CalleeMap,
Callee->getReturnType(),
1628 bool Quote = Lib.contains(
' ');
1629 std::string ArgStr = Quote ?
"\"" :
"";
1631 if (!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1633 ArgStr += Quote ?
"\"" :
"";
1638class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1641 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1642 unsigned NumRegisterParameters)
1643 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1644 Win32StructABI, NumRegisterParameters,
false) {}
1646 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1649 void getDependentLibraryOption(llvm::StringRef Lib,
1651 Opt =
"/DEFAULTLIB:";
1652 Opt += qualifyWindowsLibrary(Lib);
1655 void getDetectMismatchOption(llvm::StringRef Name,
1656 llvm::StringRef
Value,
1658 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1663void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1664 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1665 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1666 if (GV->isDeclaration())
1668 addStackProbeTargetAttributes(D, GV, CGM);
1672class WinX86_64TargetCodeGenInfo :
public TargetCodeGenInfo {
1674 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1676 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
1678 std::make_unique<SwiftABIInfo>(CGT,
true);
1681 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1682 CodeGen::CodeGenModule &CGM)
const override;
1684 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
1688 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1689 llvm::Value *Address)
const override {
1690 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1698 void getDependentLibraryOption(llvm::StringRef Lib,
1699 llvm::SmallString<24> &Opt)
const override {
1700 Opt =
"/DEFAULTLIB:";
1701 Opt += qualifyWindowsLibrary(Lib);
1704 void getDetectMismatchOption(llvm::StringRef Name,
1705 llvm::StringRef
Value,
1706 llvm::SmallString<32> &Opt)
const override {
1707 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1712void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1713 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1715 if (GV->isDeclaration())
1717 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1718 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1720 Fn->addFnAttr(
"stackrealign");
1726 addStackProbeTargetAttributes(D, GV, CGM);
1729void X86_64ABIInfo::postMerge(
unsigned AggregateSize,
Class &Lo,
1754 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1756 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1758 if (Hi == SSEUp && Lo != SSE)
1762X86_64ABIInfo::Class X86_64ABIInfo::merge(
Class Accum,
Class Field) {
1786 assert((Accum != Memory && Accum != ComplexX87) &&
1787 "Invalid accumulated classification during merge.");
1788 if (Accum == Field || Field == NoClass)
1790 if (Field == Memory)
1792 if (Accum == NoClass)
1796 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1797 Accum == X87 || Accum == X87Up)
1802void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Class &Lo,
1803 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
1814 Class &Current = OffsetBase < 64 ? Lo : Hi;
1817 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
1820 if (k == BuiltinType::Void) {
1822 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1825 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1827 }
else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1828 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1830 }
else if (k == BuiltinType::Float128) {
1833 }
else if (k == BuiltinType::LongDouble) {
1834 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1835 if (LDF == &llvm::APFloat::IEEEquad()) {
1838 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1841 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
1844 llvm_unreachable(
"unexpected long double representation!");
1853 classify(ED->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1864 if (Has64BitPointers) {
1871 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1872 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1873 if (EB_FuncPtr != EB_ThisAdj) {
1885 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
1887 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1896 uint64_t EB_Lo = (OffsetBase) / 64;
1900 }
else if (Size == 64) {
1901 QualType ElementType = VT->getElementType();
1910 if (!classifyIntegerMMXAsSSE() &&
1921 if (OffsetBase && OffsetBase != 64)
1923 }
else if (Size == 128 ||
1924 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1925 QualType ElementType = VT->getElementType();
1928 if (passInt128VectorsInMem() && Size != 128 &&
1953 if (
const ComplexType *CT = Ty->
getAs<ComplexType>()) {
1960 else if (Size <= 128)
1962 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
1965 }
else if (ET == getContext().DoubleTy) {
1967 }
else if (ET == getContext().LongDoubleTy) {
1968 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1969 if (LDF == &llvm::APFloat::IEEEquad())
1971 else if (LDF == &llvm::APFloat::x87DoubleExtended())
1972 Current = ComplexX87;
1973 else if (LDF == &llvm::APFloat::IEEEdouble())
1976 llvm_unreachable(
"unexpected long double representation!");
1981 uint64_t EB_Real = (OffsetBase) / 64;
1982 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1983 if (Hi == NoClass && EB_Real != EB_Imag)
1989 if (
const auto *EITy = Ty->
getAs<BitIntType>()) {
1990 if (EITy->getNumBits() <= 64)
1992 else if (EITy->getNumBits() <= 128)
1998 if (
const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2007 if (!IsRegCall && Size > 512)
2014 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2020 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2021 uint64_t ArraySize = AT->getZExtSize();
2028 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2031 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2032 Class FieldLo, FieldHi;
2033 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2034 Lo =
merge(Lo, FieldLo);
2035 Hi =
merge(Hi, FieldHi);
2036 if (Lo == Memory || Hi == Memory)
2040 postMerge(Size, Lo, Hi);
2041 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2059 const RecordDecl *RD = RT->getDecl()->getDefinitionOrSelf();
2065 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2071 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2072 for (
const auto &I : CXXRD->bases()) {
2073 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2074 "Unexpected base class!");
2075 const auto *
Base = I.getType()->castAsCXXRecordDecl();
2081 Class FieldLo, FieldHi;
2084 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2085 Lo =
merge(Lo, FieldLo);
2086 Hi =
merge(Hi, FieldHi);
2087 if (returnCXXRecordGreaterThan128InMem() &&
2088 (Size > 128 && (Size != getContext().getTypeSize(I.getType()) ||
2089 Size > getNativeVectorSizeForAVXABI(AVXLevel)))) {
2094 if (Lo == Memory || Hi == Memory) {
2095 postMerge(Size, Lo, Hi);
2103 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2104 LangOptions::ClangABI::Ver11 ||
2105 getContext().getTargetInfo().getTriple().isPS();
2106 bool IsUnion = RT->isUnionType() && !UseClang11Compat;
2109 i != e; ++i, ++idx) {
2111 bool BitField = i->isBitField();
2114 if (BitField && i->isUnnamedBitField())
2127 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2128 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2130 postMerge(Size, Lo, Hi);
2135 Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
2137 if (!BitField && IsInMemory) {
2139 postMerge(Size, Lo, Hi);
2149 Class FieldLo, FieldHi;
2155 assert(!i->isUnnamedBitField());
2163 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2168 FieldHi = EB_Hi ?
Integer : NoClass;
2171 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2172 Lo =
merge(Lo, FieldLo);
2173 Hi =
merge(Hi, FieldHi);
2174 if (Lo == Memory || Hi == Memory)
2178 postMerge(Size, Lo, Hi);
2182ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty)
const {
2188 Ty = ED->getIntegerType();
2191 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
2193 llvm::Type *IRTy = CGT.ConvertType(Ty);
2198 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
2201bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty)
const {
2202 if (
const VectorType *VecTy = Ty->
getAs<VectorType>()) {
2204 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2205 if (Size <= 64 || Size > LargestVector)
2207 QualType EltTy = VecTy->getElementType();
2208 if (passInt128VectorsInMem() &&
2217ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2218 unsigned freeIntRegs)
const {
2231 Ty = ED->getIntegerType();
2233 llvm::Type *IRTy = CGT.ConvertType(Ty);
2239 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
2244 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2267 if (freeIntRegs == 0) {
2272 if (Align == 8 && Size <= 64)
2278 getDataLayout().getAllocaAddrSpace());
2283llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty)
const {
2287 Ty = QualType(InnerTy, 0);
2289 llvm::Type *IRType = CGT.ConvertType(Ty);
2293 if (passInt128VectorsInMem() &&
2297 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2304 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2309 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2313 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2329 unsigned TySize = (
unsigned)Context.getTypeSize(Ty);
2330 if (TySize <= StartBit)
2334 unsigned EltSize = (
unsigned)Context.getTypeSize(AT->getElementType());
2335 unsigned NumElts = (
unsigned)AT->getZExtSize();
2338 for (
unsigned i = 0; i != NumElts; ++i) {
2340 unsigned EltOffset = i*EltSize;
2341 if (EltOffset >= EndBit)
break;
2343 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2345 EndBit-EltOffset, Context))
2356 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2357 for (
const auto &I : CXXRD->bases()) {
2358 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2359 "Unexpected base class!");
2360 const auto *
Base = I.getType()->castAsCXXRecordDecl();
2364 if (BaseOffset >= EndBit)
continue;
2366 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2368 EndBit-BaseOffset, Context))
2379 i != e; ++i, ++idx) {
2383 if (FieldOffset >= EndBit)
break;
2385 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2401 const llvm::DataLayout &TD) {
2402 if (IROffset == 0 && IRType->isFloatingPointTy())
2406 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2407 if (!STy->getNumContainedTypes())
2410 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2411 unsigned Elt = SL->getElementContainingOffset(IROffset);
2412 IROffset -= SL->getElementOffset(Elt);
2417 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2418 llvm::Type *EltTy = ATy->getElementType();
2419 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2420 IROffset -= IROffset / EltSize * EltSize;
2429llvm::Type *X86_64ABIInfo::
2430GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2431 QualType SourceTy,
unsigned SourceOffset)
const {
2432 const llvm::DataLayout &TD = getDataLayout();
2433 unsigned SourceSize =
2434 (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2436 if (!T0 || T0->isDoubleTy())
2437 return llvm::Type::getDoubleTy(getVMContext());
2440 llvm::Type *T1 =
nullptr;
2441 unsigned T0Size = TD.getTypeAllocSize(T0);
2442 if (SourceSize > T0Size)
2444 if (T1 ==
nullptr) {
2447 if (T0->is16bitFPTy() && SourceSize > 4)
2456 if (T0->isFloatTy() && T1->isFloatTy())
2457 return llvm::FixedVectorType::get(T0, 2);
2459 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2460 llvm::Type *T2 =
nullptr;
2464 return llvm::FixedVectorType::get(T0, 2);
2465 return llvm::FixedVectorType::get(T0, 4);
2468 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2469 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2471 return llvm::Type::getDoubleTy(getVMContext());
2488llvm::Type *X86_64ABIInfo::
2489GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2490 QualType SourceTy,
unsigned SourceOffset)
const {
2493 if (IROffset == 0) {
2496 IRType->isIntegerTy(64))
2505 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2506 IRType->isIntegerTy(32) ||
2512 SourceOffset*8+64, getContext()))
2517 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2519 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2520 if (IROffset < SL->getSizeInBytes()) {
2521 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2522 IROffset -= SL->getElementOffset(FieldIdx);
2524 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2525 SourceTy, SourceOffset);
2529 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2530 llvm::Type *EltTy = ATy->getElementType();
2531 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2532 unsigned EltOffset = IROffset/EltSize*EltSize;
2533 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2539 if (IRType->isIntegerTy(128)) {
2540 assert(IROffset == 0);
2546 unsigned TySizeInBytes =
2547 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2549 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2553 return llvm::IntegerType::get(getVMContext(),
2554 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2565 const llvm::DataLayout &TD) {
2570 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2571 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2572 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2573 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2584 if (Lo->isHalfTy() || Lo->isFloatTy())
2585 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2587 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2588 &&
"Invalid/unknown lo type");
2589 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2593 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
2596 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2597 "Invalid x86-64 argument pair!");
2601ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy)
const {
2604 X86_64ABIInfo::Class Lo, Hi;
2605 classify(RetTy, 0, Lo, Hi,
true);
2608 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2609 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2611 llvm::Type *ResType =
nullptr;
2618 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2619 "Unknown missing lo part");
2624 llvm_unreachable(
"Invalid classification for lo word.");
2629 return getIndirectReturnResult(RetTy);
2634 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2641 RetTy = ED->getIntegerType();
2644 isPromotableIntegerTypeForABI(RetTy))
2648 if (ResType->isIntegerTy(128)) {
2658 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2664 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2671 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2672 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2673 llvm::Type::getX86_FP80Ty(getVMContext()));
2677 llvm::Type *HighPart =
nullptr;
2683 llvm_unreachable(
"Invalid classification for hi word.");
2690 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2695 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2706 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2707 ResType = GetByteVectorType(RetTy);
2718 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2735X86_64ABIInfo::classifyArgumentType(QualType Ty,
unsigned freeIntRegs,
2736 unsigned &neededInt,
unsigned &neededSSE,
2737 bool isNamedArg,
bool IsRegCall)
const {
2740 X86_64ABIInfo::Class Lo, Hi;
2741 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2745 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2746 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2750 llvm::Type *ResType =
nullptr;
2757 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2758 "Unknown missing lo part");
2771 return getIndirectResult(Ty, freeIntRegs);
2775 llvm_unreachable(
"Invalid classification for lo word.");
2784 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2791 Ty = ED->getIntegerType();
2794 isPromotableIntegerTypeForABI(Ty))
2798 if (ResType->isIntegerTy(128)) {
2809 llvm::Type *IRType = CGT.ConvertType(Ty);
2810 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2816 llvm::Type *HighPart =
nullptr;
2824 llvm_unreachable(
"Invalid classification for hi word.");
2826 case NoClass:
break;
2831 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2842 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2852 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2853 ResType = GetByteVectorType(Ty);
2870bool X86_64ABIInfo::passRegCallStructTypeDirectly(
2871 QualType Ty, SmallVectorImpl<llvm::Type *> &CoerceElts,
unsigned &NeededInt,
2872 unsigned &NeededSSE,
unsigned &MaxVectorWidth)
const {
2880 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2881 if (CXXRD->isDynamicClass())
2884 for (
const auto &I : CXXRD->bases()) {
2885 QualType BaseTy = I.getType();
2888 if (!passRegCallStructTypeDirectly(BaseTy, CoerceElts, NeededInt,
2889 NeededSSE, MaxVectorWidth))
2895 for (
const auto *FD : RD->
fields()) {
2896 QualType MTy = FD->getType();
2900 if (!passRegCallStructTypeDirectly(MTy, CoerceElts, NeededInt, NeededSSE,
2906 const auto *AT = getContext().getAsConstantArrayType(MTy);
2908 MTy = AT->getElementType();
2910 unsigned LocalNeededInt, LocalNeededSSE;
2912 LocalNeededSSE,
true,
true);
2919 assert(CoerceTy &&
"ABI info for struct member has no coerce type");
2921 uint64_t NumElts = AT->getZExtSize();
2922 LocalNeededInt *= NumElts;
2923 LocalNeededSSE *= NumElts;
2924 CoerceElts.push_back(llvm::ArrayType::get(CoerceTy, NumElts));
2926 CoerceElts.push_back(CoerceTy);
2929 if (
const auto *VT = MTy->
getAs<VectorType>())
2930 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2931 MaxVectorWidth = getContext().getTypeSize(VT);
2933 NeededInt += LocalNeededInt;
2934 NeededSSE += LocalNeededSSE;
2941X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
unsigned &NeededInt,
2942 unsigned &NeededSSE,
2943 unsigned &MaxVectorWidth)
const {
2951 SmallVector<llvm::Type *, 16> CoerceElts;
2952 if (!passRegCallStructTypeDirectly(Ty, CoerceElts, NeededInt, NeededSSE,
2954 NeededInt = NeededSSE = 0;
2955 return getIndirectReturnResult(Ty);
2958 assert(!CoerceElts.empty() &&
"Non-empty struct produced no element types");
2960 llvm::StructType::get(getVMContext(), CoerceElts));
2963void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
2970 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2971 Win64ABIInfo.computeInfo(FI);
2975 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
2978 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2979 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2980 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2987 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2988 FreeIntRegs -= NeededInt;
2989 FreeSSERegs -= NeededSSE;
2995 ->
getAs<ComplexType>()
2996 ->getElementType()) ==
2997 getContext().LongDoubleTy)
3009 else if (NeededSSE && MaxVectorWidth > 0)
3025 it != ie; ++it, ++ArgNo) {
3026 bool IsNamedArg = ArgNo < NumRequiredArgs;
3028 if (IsRegCall && it->type->isStructureOrClassType())
3029 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
3033 NeededSSE, IsNamedArg);
3039 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3040 FreeIntRegs -= NeededInt;
3041 FreeSSERegs -= NeededSSE;
3045 it->info = getIndirectResult(it->type, FreeIntRegs);
3054 llvm::Value *overflow_arg_area =
3069 llvm::Value *Res = overflow_arg_area;
3076 uint64_t SizeInBytes = (CGF.
getContext().getTypeSize(Ty) + 7) / 8;
3077 llvm::Value *Offset =
3078 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3080 Offset,
"overflow_arg_area.next");
3084 return Address(Res, LTy, Align);
3087RValue X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3088 QualType Ty, AggValueSlot Slot)
const {
3096 unsigned neededInt, neededSSE;
3108 if (!neededInt && !neededSSE)
3124 llvm::Value *InRegs =
nullptr;
3126 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3130 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3131 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3137 llvm::Value *FitsInFP =
3138 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3139 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3140 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3146 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3167 if (neededInt && neededSSE) {
3169 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3173 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3174 llvm::Type *TyLo = ST->getElementType(0);
3175 llvm::Type *TyHi = ST->getElementType(1);
3176 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3177 "Unexpected ABI info for mixed regs");
3178 llvm::Value *GPAddr =
3180 llvm::Value *FPAddr =
3182 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3183 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3199 }
else if (neededInt || neededSSE == 1) {
3201 auto TInfo = getContext().getTypeInfoInChars(Ty);
3202 uint64_t TySize = TInfo.Width.getQuantity();
3203 CharUnits TyAlign = TInfo.Align;
3204 llvm::Type *CoTy =
nullptr;
3208 llvm::Value *GpOrFpOffset = neededInt ? gp_offset : fp_offset;
3209 uint64_t Alignment = neededInt ? 8 : 16;
3210 uint64_t RegSize = neededInt ? neededInt * 8 : 16;
3235 llvm::Value *PtrOffset =
3257 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3290 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3295 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3314RValue X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3315 QualType Ty, AggValueSlot Slot)
const {
3318 uint64_t Width = getContext().getTypeSize(Ty);
3319 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3327ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3328 QualType Ty,
unsigned &FreeSSERegs,
const ABIArgInfo ¤t)
const {
3333 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3334 FreeSSERegs -= NumElts;
3335 return getDirectX86Hva();
3340ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty,
unsigned &FreeSSERegs,
3341 bool IsReturnType,
bool IsVectorCall,
3342 bool IsRegCall)
const {
3348 Ty = ED->getIntegerType();
3350 TypeInfo Info = getContext().getTypeInfo(Ty);
3352 CharUnits Align = getContext().toCharUnitsFromBits(Info.
Align);
3356 if (!IsReturnType) {
3358 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3362 if (RT->getDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
3363 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3371 if ((IsVectorCall || IsRegCall) &&
3372 isHomogeneousAggregate(Ty, Base, NumElts)) {
3374 if (FreeSSERegs >= NumElts) {
3375 FreeSSERegs -= NumElts;
3381 Align, getDataLayout().getAllocaAddrSpace(),
3383 }
else if (IsVectorCall) {
3384 if (FreeSSERegs >= NumElts &&
3386 FreeSSERegs -= NumElts;
3388 }
else if (IsReturnType) {
3393 Align, getDataLayout().getAllocaAddrSpace(),
3402 llvm::Type *LLTy = CGT.ConvertType(Ty);
3403 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3410 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3411 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3418 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
3419 switch (BT->getKind()) {
3420 case BuiltinType::Bool:
3425 case BuiltinType::LongDouble:
3429 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3430 if (LDF == &llvm::APFloat::x87DoubleExtended())
3432 Align, getDataLayout().getAllocaAddrSpace(),
3437 case BuiltinType::Int128:
3438 case BuiltinType::UInt128:
3439 case BuiltinType::Float128:
3447 Align, getDataLayout().getAllocaAddrSpace(),
3455 llvm::Type::getInt64Ty(getVMContext()), 2));
3471 Align, getDataLayout().getAllocaAddrSpace(),
3478void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
3480 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3481 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3485 if (CC == llvm::CallingConv::X86_64_SysV) {
3486 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3487 SysVABIInfo.computeInfo(FI);
3491 unsigned FreeSSERegs = 0;
3495 }
else if (IsRegCall) {
3502 IsVectorCall, IsRegCall);
3507 }
else if (IsRegCall) {
3512 unsigned ArgNum = 0;
3513 unsigned ZeroSSERegs = 0;
3518 unsigned *MaybeFreeSSERegs =
3519 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3521 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3529 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3533RValue WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3534 QualType Ty, AggValueSlot Slot)
const {
3537 uint64_t Width = getContext().getTypeSize(Ty);
3538 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3547 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3548 unsigned NumRegisterParameters,
bool SoftFloatABI) {
3549 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3551 return std::make_unique<X86_32TargetCodeGenInfo>(
3552 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3553 NumRegisterParameters, SoftFloatABI);
3557 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3558 unsigned NumRegisterParameters) {
3559 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3561 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3562 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3563 NumRegisterParameters);
3566std::unique_ptr<TargetCodeGenInfo>
3569 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3572std::unique_ptr<TargetCodeGenInfo>
3575 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const FunctionDecl &Callee, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const FunctionDecl &Callee, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
static StringRef getTriple(const Command &Job)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
unsigned getDirectOffset() const
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getIndirect(CharUnits Alignment, unsigned AddrSpace, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
llvm::LLVMContext & getLLVMContext()
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool isRequiredArg(unsigned argIdx) const
Return true if the argument at a given index is required.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
CallingConv getCallConv() const
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
specific_decl_iterator< FieldDecl > field_iterator
field_iterator field_begin() const
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBlockPointerType() const
bool isFloat16Type() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
bool shouldPassIndirectly(CodeGenModule &CGM, ArrayRef< llvm::Type * > types, bool asReturnValue)
Should an aggregate which expands to the given type sequence be passed/returned indirectly under swif...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "singleelement struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
@ Address
A pointer to a ValueDecl.
PRESERVE_NONE bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Result
The result type of a method or function.
@ Type
The name was classified as a type.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
@ Class
The "class" keyword introduces the elaborated-type-specifier.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty