10#include "TargetInfo.h"
12#include "llvm/ADT/SmallBitVector.h"
20bool IsX86_MMXType(llvm::Type *IRType) {
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
24 IRType->getScalarSizeInBits() != 64;
30 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
31 .Cases({
"y",
"&y",
"^Ym"},
true)
33 if (IsMMXCons && Ty->isVectorTy() &&
38 if (Constraint ==
"k") {
40 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
51 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
52 if (BT->getKind() == BuiltinType::LongDouble) {
53 if (&Context.getTargetInfo().getLongDoubleFormat() ==
54 &llvm::APFloat::x87DoubleExtended())
62 unsigned VecSize = Context.getTypeSize(VT);
63 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
71static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
72 return NumMembers <= 4;
76static ABIArgInfo getDirectX86Hva(llvm::Type*
T =
nullptr) {
79 AI.setCanBeFlattened(
false);
89 CCState(CGFunctionInfo &FI)
90 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
91 Required(FI.getRequiredArgs()), IsDelegateCall(FI.isDelegateCall()) {}
93 llvm::SmallBitVector IsPreassigned;
94 unsigned CC = CallingConv::CC_C;
95 unsigned FreeRegs = 0;
96 unsigned FreeSSERegs = 0;
97 RequiredArgs Required;
98 bool IsDelegateCall =
false;
102class X86_32ABIInfo :
public ABIInfo {
108 static const unsigned MinABIStackAlignInBytes = 4;
110 bool IsDarwinVectorABI;
111 bool IsRetSmallStructInRegABI;
112 bool IsWin32StructABI;
116 unsigned DefaultNumRegisterParameters;
118 static bool isRegisterSize(
unsigned Size) {
119 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
122 bool isHomogeneousAggregateBaseType(QualType Ty)
const override {
124 return isX86VectorTypeForVectorCall(getContext(), Ty);
127 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
128 uint64_t NumMembers)
const override {
130 return isX86VectorCallAggregateSmallEnough(NumMembers);
133 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context)
const;
137 ABIArgInfo getIndirectResult(QualType Ty,
bool ByVal, CCState &State)
const;
139 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State)
const;
142 unsigned getTypeStackAlignInBytes(QualType Ty,
unsigned Align)
const;
144 Class classify(QualType Ty)
const;
147 unsigned ArgIndex)
const;
151 bool updateFreeRegs(QualType Ty, CCState &State)
const;
153 bool shouldAggregateUseDirect(QualType Ty, CCState &State,
bool &InReg,
154 bool &NeedsPadding)
const;
155 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State)
const;
157 bool canExpandIndirectArgument(QualType Ty)
const;
161 void rewriteWithInAlloca(CGFunctionInfo &FI)
const;
163 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
164 CharUnits &StackOffset, ABIArgInfo &Info,
165 QualType
Type)
const;
166 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State)
const;
170 void computeInfo(CGFunctionInfo &FI)
const override;
171 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
172 AggValueSlot Slot)
const override;
174 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT,
bool DarwinVectorABI,
175 bool RetSmallStructInRegABI,
bool Win32StructABI,
176 unsigned NumRegisterParameters,
bool SoftFloatABI)
177 : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
178 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
179 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
180 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
181 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
182 CGT.getTarget().getTriple().isOSCygMing()),
183 DefaultNumRegisterParameters(NumRegisterParameters) {}
188 explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
189 : SwiftABIInfo(CGT,
false) {}
192 bool AsReturnValue)
const override {
197 return occupiesMoreThan(ComponentTys, 3);
203 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
bool DarwinVectorABI,
204 bool RetSmallStructInRegABI,
bool Win32StructABI,
205 unsigned NumRegisterParameters,
bool SoftFloatABI)
206 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
207 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
208 NumRegisterParameters, SoftFloatABI)) {
209 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
212 static bool isStructReturnInRegABI(
213 const llvm::Triple &Triple,
const CodeGenOptions &Opts);
215 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
216 CodeGen::CodeGenModule &CGM)
const override;
218 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
224 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
225 llvm::Value *Address)
const override;
227 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
228 StringRef Constraint,
229 llvm::Type* Ty)
const override {
230 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
233 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
234 std::string &Constraints,
235 std::vector<llvm::Type *> &ResultRegTypes,
236 std::vector<llvm::Type *> &ResultTruncRegTypes,
237 std::vector<LValue> &ResultRegDests,
238 std::string &AsmString,
239 unsigned NumOutputs)
const override;
241 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
242 return "movl\t%ebp, %ebp"
243 "\t\t// marker for objc_retainAutoreleaseReturnValue";
259 std::string &AsmString) {
261 llvm::raw_string_ostream OS(Buf);
263 while (Pos < AsmString.size()) {
264 size_t DollarStart = AsmString.find(
'$', Pos);
265 if (DollarStart == std::string::npos)
266 DollarStart = AsmString.size();
267 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
268 if (DollarEnd == std::string::npos)
269 DollarEnd = AsmString.size();
270 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
272 size_t NumDollars = DollarEnd - DollarStart;
273 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
275 size_t DigitStart = Pos;
276 if (AsmString[DigitStart] ==
'{') {
280 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
281 if (DigitEnd == std::string::npos)
282 DigitEnd = AsmString.size();
283 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
284 unsigned OperandIndex;
285 if (!OperandStr.getAsInteger(10, OperandIndex)) {
286 if (OperandIndex >= FirstIn)
287 OperandIndex += NumNewOuts;
295 AsmString = std::move(Buf);
299void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
301 std::vector<llvm::Type *> &ResultRegTypes,
302 std::vector<llvm::Type *> &ResultTruncRegTypes,
303 std::vector<LValue> &ResultRegDests, std::string &AsmString,
304 unsigned NumOutputs)
const {
309 if (!Constraints.empty())
311 if (RetWidth <= 32) {
312 Constraints +=
"={eax}";
313 ResultRegTypes.push_back(CGF.
Int32Ty);
317 ResultRegTypes.push_back(CGF.
Int64Ty);
321 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
322 ResultTruncRegTypes.push_back(CoerceTy);
325 ReturnSlot.setAddress(ReturnSlot.getAddress().withElementType(CoerceTy));
326 ResultRegDests.push_back(ReturnSlot);
333bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
334 ASTContext &Context)
const {
339 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
345 if (Size == 64 || Size == 128)
360 return shouldReturnTypeInRegister(AT->getElementType(), Context);
371 for (
const auto *FD : RD->fields()) {
377 if (!shouldReturnTypeInRegister(FD->getType(), Context))
386 Ty = CTy->getElementType();
395 uint64_t Size = Context.getTypeSize(Ty);
396 return Size == 32 || Size == 64;
401 for (
const auto *FD : RD->
fields()) {
411 if (FD->isBitField())
414 Size += Context.getTypeSize(FD->getType());
436bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty)
const {
442 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
443 if (!IsWin32StructABI) {
446 if (!CXXRD->isCLike())
450 if (CXXRD->isDynamicClass())
461 return Size == getContext().getTypeSize(Ty);
464ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State)
const {
467 if (State.CC != llvm::CallingConv::X86_FastCall &&
468 State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {
471 return getNaturalAlignIndirectInReg(RetTy);
473 return getNaturalAlignIndirect(
474 RetTy, getDataLayout().getAllocaAddrSpace(),
478ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
479 CCState &State)
const {
485 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
486 State.CC == llvm::CallingConv::X86_RegCall) &&
487 isHomogeneousAggregate(RetTy, Base, NumElts)) {
492 if (
const VectorType *VT = RetTy->
getAs<VectorType>()) {
494 if (IsDarwinVectorABI) {
502 llvm::Type::getInt64Ty(getVMContext()), 2));
506 if ((Size == 8 || Size == 16 || Size == 32) ||
507 (Size == 64 && VT->getNumElements() == 1))
511 return getIndirectReturnResult(RetTy, State);
521 return getIndirectReturnResult(RetTy, State);
525 return getIndirectReturnResult(RetTy, State);
532 if (
const ComplexType *CT = RetTy->
getAs<ComplexType>()) {
533 QualType ET = getContext().getCanonicalType(CT->getElementType());
536 llvm::Type::getHalfTy(getVMContext()), 2));
541 if (shouldReturnTypeInRegister(RetTy, getContext())) {
550 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
551 || SeltTy->hasPointerRepresentation())
559 return getIndirectReturnResult(RetTy, State);
564 RetTy = ED->getIntegerType();
566 if (
const auto *EIT = RetTy->
getAs<BitIntType>())
567 if (EIT->getNumBits() > 64)
568 return getIndirectReturnResult(RetTy, State);
574unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
575 unsigned Align)
const {
578 if (Align <= MinABIStackAlignInBytes)
586 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
590 if (!IsDarwinVectorABI) {
592 return MinABIStackAlignInBytes;
600 return MinABIStackAlignInBytes;
603ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
bool ByVal,
604 CCState &State)
const {
606 if (State.FreeRegs) {
609 return getNaturalAlignIndirectInReg(Ty);
611 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
616 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
617 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
621 getDataLayout().getAllocaAddrSpace(),
626 bool Realign = TypeAlign > StackAlign;
629 getDataLayout().getAllocaAddrSpace(),
true,
633X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty)
const {
638 if (
const BuiltinType *BT =
T->
getAs<BuiltinType>()) {
640 if (K == BuiltinType::Float || K == BuiltinType::Double)
646bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State)
const {
647 if (!IsSoftFloatABI) {
653 unsigned Size = getContext().getTypeSize(Ty);
654 unsigned SizeInRegs = (
Size + 31) / 32;
660 if (SizeInRegs > State.FreeRegs) {
669 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
673 State.FreeRegs -= SizeInRegs;
677bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
679 bool &NeedsPadding)
const {
686 NeedsPadding =
false;
689 if (!updateFreeRegs(Ty, State))
695 if (State.CC == llvm::CallingConv::X86_FastCall ||
696 State.CC == llvm::CallingConv::X86_VectorCall ||
697 State.CC == llvm::CallingConv::X86_RegCall) {
698 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
707bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State)
const {
708 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
712 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
713 State.CC == llvm::CallingConv::X86_VectorCall))
716 if (!updateFreeRegs(Ty, State))
719 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
726void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State)
const {
736 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.
arguments();
737 for (
int I = 0, E = Args.size(); I < E; ++I) {
740 const QualType &Ty = Args[I].type;
742 isHomogeneousAggregate(Ty, Base, NumElts)) {
743 if (State.FreeSSERegs >= NumElts) {
744 State.FreeSSERegs -= NumElts;
746 State.IsPreassigned.set(I);
752ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
753 unsigned ArgIndex)
const {
755 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
756 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
757 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
760 TypeInfo TI = getContext().getTypeInfo(Ty);
767 return getIndirectResult(Ty,
false, State);
768 }
else if (State.IsDelegateCall) {
771 ABIArgInfo Res = getIndirectResult(Ty,
false, State);
784 if ((IsRegCall || IsVectorCall) &&
785 isHomogeneousAggregate(Ty, Base, NumElts)) {
786 if (State.FreeSSERegs >= NumElts) {
787 State.FreeSSERegs -= NumElts;
792 return getDirectX86Hva();
800 return getIndirectResult(Ty,
false, State);
806 if (RT && RT->getDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
807 return getIndirectResult(Ty,
true, State);
810 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
817 llvm::LLVMContext &LLVMContext = getVMContext();
818 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
819 bool NeedsPadding =
false;
821 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
822 unsigned SizeInRegs = (TI.
Width + 31) / 32;
823 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
824 llvm::Type *
Result = llvm::StructType::get(LLVMContext, Elements);
830 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
837 if (IsWin32StructABI && State.Required.
isRequiredArg(ArgIndex)) {
838 unsigned AlignInBits = 0;
840 const ASTRecordLayout &Layout =
841 getContext().getASTRecordLayout(RT->getDecl());
844 AlignInBits = TI.
Align;
846 if (AlignInBits > 32)
847 return getIndirectResult(Ty,
false, State);
856 if (TI.
Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
857 canExpandIndirectArgument(Ty))
859 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
861 return getIndirectResult(Ty,
true, State);
864 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
868 if (IsWin32StructABI) {
869 if (TI.
Width <= 512 && State.FreeSSERegs > 0) {
873 return getIndirectResult(Ty,
false, State);
878 if (IsDarwinVectorABI) {
880 (TI.
Width == 64 && VT->getNumElements() == 1))
882 llvm::IntegerType::get(getVMContext(), TI.
Width));
885 if (IsX86_MMXType(CGT.ConvertType(Ty)))
892 Ty = ED->getIntegerType();
894 bool InReg = shouldPrimitiveUseInReg(Ty, State);
896 if (isPromotableIntegerTypeForABI(Ty)) {
902 if (
const auto *EIT = Ty->
getAs<BitIntType>()) {
903 if (EIT->getNumBits() <= 64) {
908 return getIndirectResult(Ty,
false, State);
916void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
920 else if (State.CC == llvm::CallingConv::X86_FastCall) {
922 State.FreeSSERegs = 3;
923 }
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
925 State.FreeSSERegs = 6;
928 else if (State.CC == llvm::CallingConv::X86_RegCall) {
930 State.FreeSSERegs = 8;
931 }
else if (IsWin32StructABI) {
934 State.FreeRegs = DefaultNumRegisterParameters;
935 State.FreeSSERegs = 3;
937 State.FreeRegs = DefaultNumRegisterParameters;
944 if (State.FreeRegs) {
957 if (State.CC == llvm::CallingConv::X86_VectorCall)
958 runVectorCallFirstPass(FI, State);
960 bool UsedInAlloca =
false;
961 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.
arguments();
962 for (
unsigned I = 0, E = Args.size(); I < E; ++I) {
964 if (State.IsPreassigned.test(I))
975 rewriteWithInAlloca(FI);
979X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
980 CharUnits &StackOffset, ABIArgInfo &Info,
981 QualType
Type)
const {
984 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
989 bool IsIndirect =
false;
993 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
995 LLTy = llvm::PointerType::getUnqual(getVMContext());
996 FrameFields.push_back(LLTy);
997 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
1000 CharUnits FieldEnd = StackOffset;
1001 StackOffset = FieldEnd.
alignTo(WordSize);
1002 if (StackOffset != FieldEnd) {
1003 CharUnits NumBytes = StackOffset - FieldEnd;
1004 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1005 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1006 FrameFields.push_back(Ty);
1029 llvm_unreachable(
"invalid enum");
1032void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI)
const {
1033 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1036 SmallVector<llvm::Type *, 6> FrameFields;
1041 CharUnits StackOffset;
1048 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1050 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1055 if (
Ret.isIndirect() && !
Ret.getInReg()) {
1056 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
1058 Ret.setInAllocaSRet(IsWin32StructABI);
1066 for (; I != E; ++I) {
1068 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1071 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1076RValue X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1077 QualType Ty, AggValueSlot Slot)
const {
1079 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1081 CCState State(*
const_cast<CGFunctionInfo *
>(CGF.
CurFnInfo));
1092 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
1099bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1100 const llvm::Triple &Triple,
const CodeGenOptions &Opts) {
1101 assert(Triple.getArch() == llvm::Triple::x86);
1103 switch (Opts.getStructReturnConvention()) {
1112 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1115 switch (Triple.getOS()) {
1116 case llvm::Triple::DragonFly:
1117 case llvm::Triple::FreeBSD:
1118 case llvm::Triple::OpenBSD:
1119 case llvm::Triple::Win32:
1128 if (!FD->
hasAttr<AnyX86InterruptAttr>())
1132 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1138 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1139 Fn->getContext(), ByValTy);
1140 Fn->addParamAttr(0, NewAttr);
1143void X86_32TargetCodeGenInfo::setTargetAttributes(
1144 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1145 if (GV->isDeclaration())
1147 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1148 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1150 Fn->addFnAttr(
"stackrealign");
1157bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1158 CodeGen::CodeGenFunction &CGF,
1159 llvm::Value *Address)
const {
1160 CodeGen::CGBuilderTy &Builder = CGF.
Builder;
1162 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1173 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1179 Builder.CreateAlignedStore(
1180 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1186 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1201static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1203 case X86AVXABILevel::AVX512:
1205 case X86AVXABILevel::AVX:
1207 case X86AVXABILevel::None:
1210 llvm_unreachable(
"Unknown AVXLevel");
1214class X86_64ABIInfo :
public ABIInfo {
1251 void postMerge(
unsigned AggregateSize,
Class &Lo,
Class &Hi)
const;
1279 void classify(QualType
T, uint64_t OffsetBase,
Class &Lo,
Class &Hi,
1280 bool isNamedArg,
bool IsRegCall =
false)
const;
1282 llvm::Type *GetByteVectorType(QualType Ty)
const;
1283 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1284 unsigned IROffset, QualType SourceTy,
1285 unsigned SourceOffset)
const;
1286 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1287 unsigned IROffset, QualType SourceTy,
1288 unsigned SourceOffset)
const;
1292 ABIArgInfo getIndirectReturnResult(QualType Ty)
const;
1299 ABIArgInfo getIndirectResult(QualType Ty,
unsigned freeIntRegs)
const;
1304 unsigned &neededInt,
unsigned &neededSSE,
1306 bool IsRegCall =
false)
const;
1308 ABIArgInfo classifyRegCallStructType(QualType Ty,
unsigned &NeededInt,
1309 unsigned &NeededSSE,
1310 unsigned &MaxVectorWidth)
const;
1312 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty,
unsigned &NeededInt,
1313 unsigned &NeededSSE,
1314 unsigned &MaxVectorWidth)
const;
1316 bool IsIllegalVectorType(QualType Ty)
const;
1323 bool honorsRevision0_98()
const {
1324 return !getTarget().getTriple().isOSDarwin();
1329 bool classifyIntegerMMXAsSSE()
const {
1331 if (getContext().getLangOpts().getClangABICompat() <=
1332 LangOptions::ClangABI::Ver3_8)
1335 const llvm::Triple &Triple = getTarget().getTriple();
1336 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1342 bool passInt128VectorsInMem()
const {
1344 if (getContext().getLangOpts().getClangABICompat() <=
1345 LangOptions::ClangABI::Ver9)
1348 const llvm::Triple &
T = getTarget().getTriple();
1349 return T.isOSLinux() ||
T.isOSNetBSD();
1352 bool returnCXXRecordGreaterThan128InMem()
const {
1354 if (getContext().getLangOpts().getClangABICompat() <=
1355 LangOptions::ClangABI::Ver20 ||
1356 getTarget().getTriple().isPS())
1365 bool Has64BitPointers;
1368 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1369 : ABIInfo(CGT), AVXLevel(AVXLevel),
1370 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1372 bool isPassedUsingAVXType(QualType
type)
const {
1373 unsigned neededInt, neededSSE;
1379 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1380 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1385 void computeInfo(CGFunctionInfo &FI)
const override;
1387 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1388 AggValueSlot Slot)
const override;
1389 RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1390 AggValueSlot Slot)
const override;
1392 bool has64BitPointers()
const {
1393 return Has64BitPointers;
1398class WinX86_64ABIInfo :
public ABIInfo {
1400 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1401 : ABIInfo(CGT), AVXLevel(AVXLevel),
1402 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1404 void computeInfo(CGFunctionInfo &FI)
const override;
1406 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
1407 AggValueSlot Slot)
const override;
1409 bool isHomogeneousAggregateBaseType(QualType Ty)
const override {
1411 return isX86VectorTypeForVectorCall(getContext(), Ty);
1414 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1415 uint64_t NumMembers)
const override {
1417 return isX86VectorCallAggregateSmallEnough(NumMembers);
1420 ABIArgInfo classifyArgForArm64ECVarArg(QualType Ty)
const override {
1421 unsigned FreeSSERegs = 0;
1422 return classify(Ty, FreeSSERegs,
false,
1427 ABIArgInfo classify(QualType Ty,
unsigned &FreeSSERegs,
bool IsReturnType,
1428 bool IsVectorCall,
bool IsRegCall)
const;
1429 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty,
unsigned &FreeSSERegs,
1430 const ABIArgInfo ¤t)
const;
1437class X86_64TargetCodeGenInfo :
public TargetCodeGenInfo {
1439 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
1440 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
1442 std::make_unique<SwiftABIInfo>(CGT,
true);
1447 bool markARCOptimizedReturnCallsAsNoTail()
const override {
return true; }
1449 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
1453 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1454 llvm::Value *Address)
const override {
1455 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1463 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1464 StringRef Constraint,
1465 llvm::Type* Ty)
const override {
1466 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1469 bool isNoProtoCallVariadic(
const CallArgList &args,
1470 const FunctionNoProtoType *fnType)
const override {
1478 bool HasAVXType =
false;
1479 for (
const CallArg &arg : args) {
1480 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(
arg.Ty)) {
1493 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1494 CodeGen::CodeGenModule &CGM)
const override {
1495 if (GV->isDeclaration())
1497 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1498 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1500 Fn->addFnAttr(
"stackrealign");
1507 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
1508 const FunctionDecl *Caller,
1509 const FunctionDecl *Callee,
const CallArgList &Args,
1510 QualType ReturnType)
const override;
1515 llvm::StringMap<bool> &CallerMap,
1517 llvm::StringMap<bool> &CalleeMap,
1519 if (CalleeMap.empty() && CallerMap.empty()) {
1531 const llvm::StringMap<bool> &CallerMap,
1532 const llvm::StringMap<bool> &CalleeMap,
1535 bool CallerHasFeat = CallerMap.lookup(
Feature);
1536 bool CalleeHasFeat = CalleeMap.lookup(
Feature);
1538 if (!CallerHasFeat && !CalleeHasFeat &&
1539 (!Callee.isExternallyVisible() || Callee.hasAttr<AlwaysInlineAttr>()))
1542 if (!CallerHasFeat && !CalleeHasFeat)
1543 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1544 << IsArgument << Ty <<
Feature;
1547 if (!CallerHasFeat || !CalleeHasFeat)
1548 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1549 << IsArgument << Ty <<
Feature;
1558 const llvm::StringMap<bool> &CallerMap,
1559 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
1564 "avx512f", IsArgument);
1573void X86_64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
1574 SourceLocation CallLoc,
1575 const FunctionDecl *Caller,
1576 const FunctionDecl *Callee,
1577 const CallArgList &Args,
1578 QualType ReturnType)
const {
1582 llvm::StringMap<bool> CallerMap;
1583 llvm::StringMap<bool> CalleeMap;
1584 unsigned ArgIndex = 0;
1588 for (
const CallArg &Arg : Args) {
1596 if (Arg.getType()->isVectorType() &&
1599 QualType Ty = Arg.getType();
1602 if (ArgIndex < Callee->getNumParams())
1603 Ty =
Callee->getParamDecl(ArgIndex)->getType();
1606 CallerMap, CalleeMap, Ty,
true))
1614 if (
Callee->getReturnType()->isVectorType() &&
1618 CalleeMap,
Callee->getReturnType(),
1627 bool Quote = Lib.contains(
' ');
1628 std::string ArgStr = Quote ?
"\"" :
"";
1630 if (!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1632 ArgStr += Quote ?
"\"" :
"";
1637class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1640 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1641 unsigned NumRegisterParameters)
1642 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1643 Win32StructABI, NumRegisterParameters,
false) {}
1645 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1648 void getDependentLibraryOption(llvm::StringRef Lib,
1650 Opt =
"/DEFAULTLIB:";
1651 Opt += qualifyWindowsLibrary(Lib);
1654 void getDetectMismatchOption(llvm::StringRef Name,
1655 llvm::StringRef
Value,
1657 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1662void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1663 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1664 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1665 if (GV->isDeclaration())
1667 addStackProbeTargetAttributes(D, GV, CGM);
1671class WinX86_64TargetCodeGenInfo :
public TargetCodeGenInfo {
1673 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1675 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
1677 std::make_unique<SwiftABIInfo>(CGT,
true);
1680 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1681 CodeGen::CodeGenModule &CGM)
const override;
1683 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM)
const override {
1687 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1688 llvm::Value *Address)
const override {
1689 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1697 void getDependentLibraryOption(llvm::StringRef Lib,
1698 llvm::SmallString<24> &Opt)
const override {
1699 Opt =
"/DEFAULTLIB:";
1700 Opt += qualifyWindowsLibrary(Lib);
1703 void getDetectMismatchOption(llvm::StringRef Name,
1704 llvm::StringRef
Value,
1705 llvm::SmallString<32> &Opt)
const override {
1706 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1711void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1712 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
const {
1714 if (GV->isDeclaration())
1716 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1717 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1719 Fn->addFnAttr(
"stackrealign");
1725 addStackProbeTargetAttributes(D, GV, CGM);
1728void X86_64ABIInfo::postMerge(
unsigned AggregateSize,
Class &Lo,
1753 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1755 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1757 if (Hi == SSEUp && Lo != SSE)
1761X86_64ABIInfo::Class X86_64ABIInfo::merge(
Class Accum,
Class Field) {
1785 assert((Accum != Memory && Accum != ComplexX87) &&
1786 "Invalid accumulated classification during merge.");
1787 if (Accum == Field || Field == NoClass)
1789 if (Field == Memory)
1791 if (Accum == NoClass)
1795 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1796 Accum == X87 || Accum == X87Up)
1801void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Class &Lo,
1802 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
1813 Class &Current = OffsetBase < 64 ? Lo : Hi;
1816 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
1819 if (k == BuiltinType::Void) {
1821 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1824 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1826 }
else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1827 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1829 }
else if (k == BuiltinType::Float128) {
1832 }
else if (k == BuiltinType::LongDouble) {
1833 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1834 if (LDF == &llvm::APFloat::IEEEquad()) {
1837 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1840 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
1843 llvm_unreachable(
"unexpected long double representation!");
1852 classify(ED->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1863 if (Has64BitPointers) {
1870 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1871 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1872 if (EB_FuncPtr != EB_ThisAdj) {
1884 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
1886 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1895 uint64_t EB_Lo = (OffsetBase) / 64;
1899 }
else if (Size == 64) {
1900 QualType ElementType = VT->getElementType();
1909 if (!classifyIntegerMMXAsSSE() &&
1920 if (OffsetBase && OffsetBase != 64)
1922 }
else if (Size == 128 ||
1923 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1924 QualType ElementType = VT->getElementType();
1927 if (passInt128VectorsInMem() && Size != 128 &&
1952 if (
const ComplexType *CT = Ty->
getAs<ComplexType>()) {
1959 else if (Size <= 128)
1961 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
1964 }
else if (ET == getContext().DoubleTy) {
1966 }
else if (ET == getContext().LongDoubleTy) {
1967 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1968 if (LDF == &llvm::APFloat::IEEEquad())
1970 else if (LDF == &llvm::APFloat::x87DoubleExtended())
1971 Current = ComplexX87;
1972 else if (LDF == &llvm::APFloat::IEEEdouble())
1975 llvm_unreachable(
"unexpected long double representation!");
1980 uint64_t EB_Real = (OffsetBase) / 64;
1981 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1982 if (Hi == NoClass && EB_Real != EB_Imag)
1988 if (
const auto *EITy = Ty->
getAs<BitIntType>()) {
1989 if (EITy->getNumBits() <= 64)
1991 else if (EITy->getNumBits() <= 128)
1997 if (
const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2006 if (!IsRegCall && Size > 512)
2013 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2019 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2020 uint64_t ArraySize = AT->getZExtSize();
2027 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2030 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2031 Class FieldLo, FieldHi;
2032 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2033 Lo = merge(Lo, FieldLo);
2034 Hi = merge(Hi, FieldHi);
2035 if (Lo == Memory || Hi == Memory)
2039 postMerge(Size, Lo, Hi);
2040 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2058 const RecordDecl *RD = RT->getDecl()->getDefinitionOrSelf();
2064 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2070 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2071 for (
const auto &I : CXXRD->bases()) {
2072 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2073 "Unexpected base class!");
2074 const auto *
Base = I.getType()->castAsCXXRecordDecl();
2080 Class FieldLo, FieldHi;
2083 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2084 Lo = merge(Lo, FieldLo);
2085 Hi = merge(Hi, FieldHi);
2086 if (returnCXXRecordGreaterThan128InMem() &&
2087 (Size > 128 && (Size != getContext().getTypeSize(I.getType()) ||
2088 Size > getNativeVectorSizeForAVXABI(AVXLevel)))) {
2093 if (Lo == Memory || Hi == Memory) {
2094 postMerge(Size, Lo, Hi);
2102 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2103 LangOptions::ClangABI::Ver11 ||
2104 getContext().getTargetInfo().getTriple().isPS();
2105 bool IsUnion = RT->isUnionType() && !UseClang11Compat;
2108 i != e; ++i, ++idx) {
2110 bool BitField = i->isBitField();
2113 if (BitField && i->isUnnamedBitField())
2126 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2127 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2129 postMerge(Size, Lo, Hi);
2134 Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
2136 if (!BitField && IsInMemory) {
2138 postMerge(Size, Lo, Hi);
2148 Class FieldLo, FieldHi;
2154 assert(!i->isUnnamedBitField());
2162 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2167 FieldHi = EB_Hi ?
Integer : NoClass;
2170 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2171 Lo = merge(Lo, FieldLo);
2172 Hi = merge(Hi, FieldHi);
2173 if (Lo == Memory || Hi == Memory)
2177 postMerge(Size, Lo, Hi);
2181ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty)
const {
2187 Ty = ED->getIntegerType();
2190 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
2196 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
2199bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty)
const {
2200 if (
const VectorType *VecTy = Ty->
getAs<VectorType>()) {
2202 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2203 if (Size <= 64 || Size > LargestVector)
2205 QualType EltTy = VecTy->getElementType();
2206 if (passInt128VectorsInMem() &&
2215ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2216 unsigned freeIntRegs)
const {
2229 Ty = ED->getIntegerType();
2236 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
2241 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2264 if (freeIntRegs == 0) {
2269 if (Align == 8 && Size <= 64)
2275 getDataLayout().getAllocaAddrSpace());
2280llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty)
const {
2284 Ty = QualType(InnerTy, 0);
2286 llvm::Type *IRType = CGT.ConvertType(Ty);
2290 if (passInt128VectorsInMem() &&
2294 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2301 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2306 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2310 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2326 unsigned TySize = (
unsigned)Context.getTypeSize(Ty);
2327 if (TySize <= StartBit)
2331 unsigned EltSize = (
unsigned)Context.getTypeSize(AT->getElementType());
2332 unsigned NumElts = (
unsigned)AT->getZExtSize();
2335 for (
unsigned i = 0; i != NumElts; ++i) {
2337 unsigned EltOffset = i*EltSize;
2338 if (EltOffset >= EndBit)
break;
2340 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2342 EndBit-EltOffset, Context))
2353 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2354 for (
const auto &I : CXXRD->bases()) {
2355 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2356 "Unexpected base class!");
2357 const auto *
Base = I.getType()->castAsCXXRecordDecl();
2361 if (BaseOffset >= EndBit)
continue;
2363 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2365 EndBit-BaseOffset, Context))
2376 i != e; ++i, ++idx) {
2380 if (FieldOffset >= EndBit)
break;
2382 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2398 const llvm::DataLayout &TD) {
2399 if (IROffset == 0 && IRType->isFloatingPointTy())
2403 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2404 if (!STy->getNumContainedTypes())
2407 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2408 unsigned Elt = SL->getElementContainingOffset(IROffset);
2409 IROffset -= SL->getElementOffset(Elt);
2414 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2415 llvm::Type *EltTy = ATy->getElementType();
2416 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2417 IROffset -= IROffset / EltSize * EltSize;
2426llvm::Type *X86_64ABIInfo::
2427GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2428 QualType SourceTy,
unsigned SourceOffset)
const {
2429 const llvm::DataLayout &TD = getDataLayout();
2430 unsigned SourceSize =
2431 (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2433 if (!T0 || T0->isDoubleTy())
2434 return llvm::Type::getDoubleTy(getVMContext());
2437 llvm::Type *T1 =
nullptr;
2438 unsigned T0Size = TD.getTypeAllocSize(T0);
2439 if (SourceSize > T0Size)
2441 if (T1 ==
nullptr) {
2444 if (T0->is16bitFPTy() && SourceSize > 4)
2453 if (T0->isFloatTy() && T1->isFloatTy())
2454 return llvm::FixedVectorType::get(T0, 2);
2456 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2457 llvm::Type *T2 =
nullptr;
2461 return llvm::FixedVectorType::get(T0, 2);
2462 return llvm::FixedVectorType::get(T0, 4);
2465 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2466 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2468 return llvm::Type::getDoubleTy(getVMContext());
2485llvm::Type *X86_64ABIInfo::
2486GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2487 QualType SourceTy,
unsigned SourceOffset)
const {
2490 if (IROffset == 0) {
2493 IRType->isIntegerTy(64))
2502 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2503 IRType->isIntegerTy(32) ||
2509 SourceOffset*8+64, getContext()))
2514 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2516 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2517 if (IROffset < SL->getSizeInBytes()) {
2518 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2519 IROffset -= SL->getElementOffset(FieldIdx);
2521 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2522 SourceTy, SourceOffset);
2526 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2527 llvm::Type *EltTy = ATy->getElementType();
2528 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2529 unsigned EltOffset = IROffset/EltSize*EltSize;
2530 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2536 if (IRType->isIntegerTy(128)) {
2537 assert(IROffset == 0);
2543 unsigned TySizeInBytes =
2544 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2546 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2550 return llvm::IntegerType::get(getVMContext(),
2551 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2562 const llvm::DataLayout &TD) {
2567 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2568 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2569 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2570 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2581 if (Lo->isHalfTy() || Lo->isFloatTy())
2582 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2584 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2585 &&
"Invalid/unknown lo type");
2586 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2590 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
2593 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2594 "Invalid x86-64 argument pair!");
2598ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy)
const {
2601 X86_64ABIInfo::Class Lo, Hi;
2602 classify(RetTy, 0, Lo, Hi,
true);
2605 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2606 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2608 llvm::Type *ResType =
nullptr;
2615 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2616 "Unknown missing lo part");
2621 llvm_unreachable(
"Invalid classification for lo word.");
2626 return getIndirectReturnResult(RetTy);
2631 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2638 RetTy = ED->getIntegerType();
2641 isPromotableIntegerTypeForABI(RetTy))
2645 if (ResType->isIntegerTy(128)) {
2655 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2661 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2668 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2669 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2670 llvm::Type::getX86_FP80Ty(getVMContext()));
2674 llvm::Type *HighPart =
nullptr;
2680 llvm_unreachable(
"Invalid classification for hi word.");
2687 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2692 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2703 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2704 ResType = GetByteVectorType(RetTy);
2715 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2732X86_64ABIInfo::classifyArgumentType(QualType Ty,
unsigned freeIntRegs,
2733 unsigned &neededInt,
unsigned &neededSSE,
2734 bool isNamedArg,
bool IsRegCall)
const {
2737 X86_64ABIInfo::Class Lo, Hi;
2738 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2742 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2743 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2747 llvm::Type *ResType =
nullptr;
2754 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
2755 "Unknown missing lo part");
2768 return getIndirectResult(Ty, freeIntRegs);
2772 llvm_unreachable(
"Invalid classification for lo word.");
2781 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2788 Ty = ED->getIntegerType();
2791 isPromotableIntegerTypeForABI(Ty))
2795 if (ResType->isIntegerTy(128)) {
2806 llvm::Type *IRType = CGT.ConvertType(Ty);
2807 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2813 llvm::Type *HighPart =
nullptr;
2821 llvm_unreachable(
"Invalid classification for hi word.");
2823 case NoClass:
break;
2828 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2839 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2849 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2850 ResType = GetByteVectorType(Ty);
2864X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty,
unsigned &NeededInt,
2865 unsigned &NeededSSE,
2866 unsigned &MaxVectorWidth)
const {
2871 return getIndirectReturnResult(Ty);
2874 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2875 if (CXXRD->isDynamicClass()) {
2876 NeededInt = NeededSSE = 0;
2877 return getIndirectReturnResult(Ty);
2880 for (
const auto &I : CXXRD->bases())
2881 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2884 NeededInt = NeededSSE = 0;
2885 return getIndirectReturnResult(Ty);
2890 for (
const auto *FD : RD->
fields()) {
2891 QualType MTy = FD->getType();
2893 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2896 NeededInt = NeededSSE = 0;
2897 return getIndirectReturnResult(Ty);
2900 unsigned LocalNeededInt, LocalNeededSSE;
2904 NeededInt = NeededSSE = 0;
2905 return getIndirectReturnResult(Ty);
2907 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
2908 MTy = AT->getElementType();
2909 if (
const auto *VT = MTy->
getAs<VectorType>())
2910 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2911 MaxVectorWidth = getContext().getTypeSize(VT);
2912 NeededInt += LocalNeededInt;
2913 NeededSSE += LocalNeededSSE;
2921X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
unsigned &NeededInt,
2922 unsigned &NeededSSE,
2923 unsigned &MaxVectorWidth)
const {
2929 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2933void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
2940 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2941 Win64ABIInfo.computeInfo(FI);
2945 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
2948 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2949 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2950 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2957 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2958 FreeIntRegs -= NeededInt;
2959 FreeSSERegs -= NeededSSE;
2965 ->
getAs<ComplexType>()
2966 ->getElementType()) ==
2967 getContext().LongDoubleTy)
2979 else if (NeededSSE && MaxVectorWidth > 0)
2991 it != ie; ++it, ++ArgNo) {
2992 bool IsNamedArg = ArgNo < NumRequiredArgs;
2994 if (IsRegCall && it->type->isStructureOrClassType())
2995 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2999 NeededSSE, IsNamedArg);
3005 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3006 FreeIntRegs -= NeededInt;
3007 FreeSSERegs -= NeededSSE;
3011 it->info = getIndirectResult(it->type, FreeIntRegs);
3020 llvm::Value *overflow_arg_area =
3035 llvm::Value *Res = overflow_arg_area;
3042 uint64_t SizeInBytes = (CGF.
getContext().getTypeSize(Ty) + 7) / 8;
3043 llvm::Value *Offset =
3044 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3046 Offset,
"overflow_arg_area.next");
3050 return Address(Res, LTy, Align);
3053RValue X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3054 QualType Ty, AggValueSlot Slot)
const {
3062 unsigned neededInt, neededSSE;
3074 if (!neededInt && !neededSSE)
3090 llvm::Value *InRegs =
nullptr;
3092 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3096 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3097 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3103 llvm::Value *FitsInFP =
3104 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3105 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3106 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3112 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3133 if (neededInt && neededSSE) {
3135 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3139 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3140 llvm::Type *TyLo = ST->getElementType(0);
3141 llvm::Type *TyHi = ST->getElementType(1);
3142 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3143 "Unexpected ABI info for mixed regs");
3144 llvm::Value *GPAddr =
3146 llvm::Value *FPAddr =
3148 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3149 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3165 }
else if (neededInt || neededSSE == 1) {
3167 auto TInfo = getContext().getTypeInfoInChars(Ty);
3168 uint64_t TySize = TInfo.Width.getQuantity();
3169 CharUnits TyAlign = TInfo.Align;
3170 llvm::Type *CoTy =
nullptr;
3174 llvm::Value *GpOrFpOffset = neededInt ? gp_offset : fp_offset;
3175 uint64_t Alignment = neededInt ? 8 : 16;
3176 uint64_t RegSize = neededInt ? neededInt * 8 : 16;
3201 llvm::Value *PtrOffset =
3203 Address Dst = Address(
3223 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3256 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3261 llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3275 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3280RValue X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3281 QualType Ty, AggValueSlot Slot)
const {
3284 uint64_t Width = getContext().getTypeSize(Ty);
3285 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3293ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3294 QualType Ty,
unsigned &FreeSSERegs,
const ABIArgInfo ¤t)
const {
3299 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3300 FreeSSERegs -= NumElts;
3301 return getDirectX86Hva();
3306ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty,
unsigned &FreeSSERegs,
3307 bool IsReturnType,
bool IsVectorCall,
3308 bool IsRegCall)
const {
3314 Ty = ED->getIntegerType();
3316 TypeInfo Info = getContext().getTypeInfo(Ty);
3318 CharUnits Align = getContext().toCharUnitsFromBits(Info.
Align);
3322 if (!IsReturnType) {
3324 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3328 if (RT->getDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
3329 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3337 if ((IsVectorCall || IsRegCall) &&
3338 isHomogeneousAggregate(Ty, Base, NumElts)) {
3340 if (FreeSSERegs >= NumElts) {
3341 FreeSSERegs -= NumElts;
3347 Align, getDataLayout().getAllocaAddrSpace(),
3349 }
else if (IsVectorCall) {
3350 if (FreeSSERegs >= NumElts &&
3352 FreeSSERegs -= NumElts;
3354 }
else if (IsReturnType) {
3359 Align, getDataLayout().getAllocaAddrSpace(),
3368 llvm::Type *LLTy = CGT.ConvertType(Ty);
3369 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3376 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3377 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
3384 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
3385 switch (BT->getKind()) {
3386 case BuiltinType::Bool:
3391 case BuiltinType::LongDouble:
3395 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3396 if (LDF == &llvm::APFloat::x87DoubleExtended())
3398 Align, getDataLayout().getAllocaAddrSpace(),
3403 case BuiltinType::Int128:
3404 case BuiltinType::UInt128:
3405 case BuiltinType::Float128:
3413 Align, getDataLayout().getAllocaAddrSpace(),
3421 llvm::Type::getInt64Ty(getVMContext()), 2));
3437 Align, getDataLayout().getAllocaAddrSpace(),
3444void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI)
const {
3446 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3447 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3451 if (CC == llvm::CallingConv::X86_64_SysV) {
3452 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3453 SysVABIInfo.computeInfo(FI);
3457 unsigned FreeSSERegs = 0;
3461 }
else if (IsRegCall) {
3468 IsVectorCall, IsRegCall);
3473 }
else if (IsRegCall) {
3478 unsigned ArgNum = 0;
3479 unsigned ZeroSSERegs = 0;
3484 unsigned *MaybeFreeSSERegs =
3485 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3487 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3495 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3499RValue WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3500 QualType Ty, AggValueSlot Slot)
const {
3503 uint64_t Width = getContext().getTypeSize(Ty);
3504 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3513 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3514 unsigned NumRegisterParameters,
bool SoftFloatABI) {
3515 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3517 return std::make_unique<X86_32TargetCodeGenInfo>(
3518 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3519 NumRegisterParameters, SoftFloatABI);
3523 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3524 unsigned NumRegisterParameters) {
3525 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3527 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3528 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3529 NumRegisterParameters);
3532std::unique_ptr<TargetCodeGenInfo>
3535 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3538std::unique_ptr<TargetCodeGenInfo>
3541 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const FunctionDecl &Callee, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const FunctionDecl &Callee, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
unsigned getDirectOffset() const
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getIndirect(CharUnits Alignment, unsigned AddrSpace, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
llvm::LLVMContext & getLLVMContext()
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
bool isRequiredArg(unsigned argIdx) const
Return true if the argument at a given index is required.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
CallingConv getCallConv() const
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
specific_decl_iterator< FieldDecl > field_iterator
field_iterator field_begin() const
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBlockPointerType() const
bool isFloat16Type() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
bool shouldPassIndirectly(CodeGenModule &CGM, ArrayRef< llvm::Type * > types, bool asReturnValue)
Should an aggregate which expands to the given type sequence be passed/returned indirectly under swif...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "singleelement struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Result
The result type of a method or function.
const FunctionProtoType * T
@ Type
The name was classified as a type.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
@ Class
The "class" keyword introduces the elaborated-type-specifier.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty