27 #include "llvm/ADT/SmallBitVector.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/ADT/StringSwitch.h"
30 #include "llvm/ADT/Triple.h"
31 #include "llvm/ADT/Twine.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/IntrinsicsNVPTX.h"
34 #include "llvm/IR/IntrinsicsS390.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
40 using namespace clang;
41 using namespace CodeGen;
59 llvm::LLVMContext &LLVMContext) {
63 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
64 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
74 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
76 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
88 llvm::Type *Padding)
const {
137 unsigned maxAllRegisters) {
138 unsigned intCount = 0, fpCount = 0;
139 for (llvm::Type *
type : scalarTypes) {
140 if (
type->isPointerTy()) {
142 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
144 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
146 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
151 return (intCount + fpCount > maxAllRegisters);
156 unsigned numElts)
const {
170 return CXXABI.getRecordArgABI(RD);
186 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
187 !RT->getDecl()->canPassInRegisters()) {
192 return CXXABI.classifyReturnType(FI);
200 if (UD->
hasAttr<TransparentUnionAttr>()) {
201 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
239 uint64_t Members)
const {
244 raw_ostream &OS = llvm::errs();
245 OS <<
"(ABIArgInfo Kind=";
248 OS <<
"Direct Type=";
277 OS <<
"CoerceAndExpand Type=";
288 llvm::Value *PtrAsInt = Ptr;
291 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
293 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
295 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
297 Ptr->getName() +
".aligned");
317 llvm::Type *DirectTy,
321 bool AllowHigherAlign) {
331 if (AllowHigherAlign && DirectAlign > SlotSize) {
347 !DirectTy->isStructTy()) {
372 bool AllowHigherAlign) {
379 DirectSize = ValueInfo.
Width;
380 DirectAlign = ValueInfo.
Align;
386 DirectTy = DirectTy->getPointerTo(0);
390 SlotSizeAndAlign, AllowHigherAlign);
404 SlotSize, SlotSize,
true);
412 2 * SlotSize - EltSize);
430 Address Addr1, llvm::BasicBlock *Block1,
431 Address Addr2, llvm::BasicBlock *Block2,
432 const llvm::Twine &Name =
"") {
434 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
485 return llvm::CallingConv::SPIR_KERNEL;
489 llvm::PointerType *T,
QualType QT)
const {
490 return llvm::ConstantPointerNull::get(T);
497 "Address space agnostic languages only");
503 LangAS DestAddr, llvm::Type *DestTy,
bool isNonNull)
const {
506 if (
auto *C = dyn_cast<llvm::Constant>(Src))
510 Src, DestTy, Src->hasName() ? Src->getName() +
".ascast" :
"");
516 llvm::Type *DestTy)
const {
519 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
525 llvm::AtomicOrdering Ordering,
526 llvm::LLVMContext &Ctx)
const {
527 return Ctx.getOrInsertSyncScopeID(
"");
543 bool WasArray =
false;
546 if (AT->getSize() == 0)
548 FT = AT->getElementType();
568 if (isa<CXXRecordDecl>(RT->
getDecl()) &&
569 (WasArray || !FD->
hasAttr<NoUniqueAddressAttr>()))
587 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
588 for (
const auto &I : CXXRD->bases())
592 for (
const auto *I : RD->
fields())
615 const Type *Found =
nullptr;
618 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
619 for (
const auto &I : CXXRD->bases()) {
637 for (
const auto *FD : RD->
fields()) {
651 if (AT->getSize().getZExtValue() != 1)
653 FT = AT->getElementType();
689 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
692 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
698 llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
701 return Address(Addr, ElementTy, TyAlignForABI);
704 "Unexpected ArgInfo Kind in generic VAArg emitter!");
707 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
709 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
711 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
713 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
727 class DefaultABIInfo :
public ABIInfo {
762 return getNaturalAlignIndirect(Ty);
767 Ty = EnumTy->getDecl()->getIntegerType();
771 if (EIT->getNumBits() >
775 return getNaturalAlignIndirect(Ty);
786 return getNaturalAlignIndirect(RetTy);
790 RetTy = EnumTy->getDecl()->getIntegerType();
793 if (EIT->getNumBits() >
794 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
795 ? getContext().Int128Ty
796 : getContext().LongLongTy))
797 return getNaturalAlignIndirect(RetTy);
817 DefaultABIInfo defaultInfo;
842 bool asReturnValue)
const override {
846 bool isSwiftErrorInRegister()
const override {
854 WebAssemblyABIInfo::ABIKind K)
857 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
860 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
861 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
862 llvm::Function *Fn = cast<llvm::Function>(GV);
863 llvm::AttrBuilder B(GV->getContext());
864 B.addAttribute(
"wasm-import-module",
Attr->getImportModule());
867 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
868 llvm::Function *Fn = cast<llvm::Function>(GV);
869 llvm::AttrBuilder B(GV->getContext());
870 B.addAttribute(
"wasm-import-name",
Attr->getImportName());
873 if (
const auto *
Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
874 llvm::Function *Fn = cast<llvm::Function>(GV);
875 llvm::AttrBuilder B(GV->getContext());
876 B.addAttribute(
"wasm-export-name",
Attr->getExportName());
881 if (
auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
882 llvm::Function *Fn = cast<llvm::Function>(GV);
883 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
884 Fn->addFnAttr(
"no-prototype");
907 if (
Kind == ABIKind::ExperimentalMV) {
910 bool HasBitField =
false;
912 if (
Field->isBitField()) {
923 return defaultInfo.classifyArgumentType(Ty);
940 if (
Kind == ABIKind::ExperimentalMV)
946 return defaultInfo.classifyReturnType(RetTy);
955 getContext().getTypeInfoInChars(Ty),
967 class PNaClABIInfo :
public ABIInfo {
1009 return getNaturalAlignIndirect(Ty);
1012 Ty = EnumTy->getDecl()->getIntegerType();
1019 if (EIT->getNumBits() > 64)
1020 return getNaturalAlignIndirect(Ty);
1034 return getNaturalAlignIndirect(RetTy);
1038 if (EIT->getNumBits() > 64)
1039 return getNaturalAlignIndirect(RetTy);
1045 RetTy = EnumTy->getDecl()->getIntegerType();
1052 bool IsX86_MMXType(llvm::Type *IRType) {
1054 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1055 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1056 IRType->getScalarSizeInBits() != 64;
1060 StringRef Constraint,
1062 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1063 .Cases(
"y",
"&y",
"^Ym",
true)
1065 if (IsMMXCons && Ty->isVectorTy()) {
1066 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
1083 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1084 if (BT->getKind() == BuiltinType::LongDouble) {
1086 &llvm::APFloat::x87DoubleExtended())
1095 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1103 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1104 return NumMembers <= 4;
1108 static ABIArgInfo getDirectX86Hva(llvm::Type* T =
nullptr) {
1122 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1124 llvm::SmallBitVector IsPreassigned;
1126 unsigned FreeRegs = 0;
1127 unsigned FreeSSERegs = 0;
1137 static const unsigned MinABIStackAlignInBytes = 4;
1139 bool IsDarwinVectorABI;
1140 bool IsRetSmallStructInRegABI;
1141 bool IsWin32StructABI;
1142 bool IsSoftFloatABI;
1145 unsigned DefaultNumRegisterParameters;
1147 static bool isRegisterSize(
unsigned Size) {
1148 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1151 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
1153 return isX86VectorTypeForVectorCall(getContext(), Ty);
1156 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1157 uint64_t NumMembers)
const override {
1159 return isX86VectorCallAggregateSmallEnough(NumMembers);
1171 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
1181 bool shouldAggregateUseDirect(
QualType Ty, CCState &
State,
bool &InReg,
1182 bool &NeedsPadding)
const;
1183 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &
State)
const;
1185 bool canExpandIndirectArgument(
QualType Ty)
const;
1203 bool RetSmallStructInRegABI,
bool Win32StructABI,
1204 unsigned NumRegisterParameters,
bool SoftFloatABI)
1205 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1206 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1207 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
1208 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1209 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
1210 CGT.getTarget().getTriple().isOSCygMing()),
1211 DefaultNumRegisterParameters(NumRegisterParameters) {}
1214 bool asReturnValue)
const override {
1222 bool isSwiftErrorInRegister()
const override {
1231 bool RetSmallStructInRegABI,
bool Win32StructABI,
1232 unsigned NumRegisterParameters,
bool SoftFloatABI)
1234 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1235 NumRegisterParameters, SoftFloatABI)) {}
1237 static bool isStructReturnInRegABI(
1240 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1250 llvm::Value *
Address)
const override;
1253 StringRef Constraint,
1254 llvm::Type* Ty)
const override {
1255 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1260 std::vector<llvm::Type *> &ResultRegTypes,
1261 std::vector<llvm::Type *> &ResultTruncRegTypes,
1262 std::vector<LValue> &ResultRegDests,
1264 unsigned NumOutputs)
const override;
1268 unsigned Sig = (0xeb << 0) |
1272 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1275 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1276 return "movl\t%ebp, %ebp"
1277 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1292 unsigned NumNewOuts,
1295 llvm::raw_string_ostream OS(Buf);
1297 while (Pos < AsmString.size()) {
1298 size_t DollarStart = AsmString.find(
'$', Pos);
1299 if (DollarStart == std::string::npos)
1300 DollarStart = AsmString.size();
1301 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1302 if (DollarEnd == std::string::npos)
1303 DollarEnd = AsmString.size();
1304 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1306 size_t NumDollars = DollarEnd - DollarStart;
1307 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1309 size_t DigitStart = Pos;
1310 if (AsmString[DigitStart] ==
'{') {
1314 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1315 if (DigitEnd == std::string::npos)
1316 DigitEnd = AsmString.size();
1317 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1318 unsigned OperandIndex;
1319 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1320 if (OperandIndex >= FirstIn)
1321 OperandIndex += NumNewOuts;
1329 AsmString = std::move(OS.str());
1333 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1335 std::vector<llvm::Type *> &ResultRegTypes,
1336 std::vector<llvm::Type *> &ResultTruncRegTypes,
1337 std::vector<LValue> &ResultRegDests,
std::string &AsmString,
1338 unsigned NumOutputs)
const {
1343 if (!Constraints.empty())
1345 if (RetWidth <= 32) {
1346 Constraints +=
"={eax}";
1347 ResultRegTypes.push_back(CGF.
Int32Ty);
1350 Constraints +=
"=A";
1351 ResultRegTypes.push_back(CGF.
Int64Ty);
1355 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
1356 ResultTruncRegTypes.push_back(CoerceTy);
1361 ResultRegDests.push_back(ReturnSlot);
1368 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1374 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1380 if (Size == 64 || Size == 128)
1395 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1399 if (!RT)
return false;
1411 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1420 Ty = CTy->getElementType();
1430 return Size == 32 || Size == 64;
1435 for (
const auto *FD : RD->
fields()) {
1445 if (FD->isBitField())
1470 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1477 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1478 if (!IsWin32StructABI) {
1481 if (!CXXRD->isCLike())
1485 if (CXXRD->isDynamicClass())
1496 return Size == getContext().getTypeSize(Ty);
1502 if (
State.FreeRegs) {
1505 return getNaturalAlignIndirectInReg(RetTy);
1507 return getNaturalAlignIndirect(RetTy,
false);
1511 CCState &
State)
const {
1516 uint64_t NumElts = 0;
1517 if ((
State.CC == llvm::CallingConv::X86_VectorCall ||
1518 State.CC == llvm::CallingConv::X86_RegCall) &&
1519 isHomogeneousAggregate(RetTy,
Base, NumElts)) {
1526 if (IsDarwinVectorABI) {
1527 uint64_t
Size = getContext().getTypeSize(RetTy);
1534 llvm::Type::getInt64Ty(getVMContext()), 2));
1538 if ((Size == 8 || Size == 16 || Size == 32) ||
1539 (Size == 64 && VT->getNumElements() == 1))
1543 return getIndirectReturnResult(RetTy,
State);
1553 return getIndirectReturnResult(RetTy,
State);
1558 return getIndirectReturnResult(RetTy,
State);
1569 llvm::Type::getHalfTy(getVMContext()), 2));
1574 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1575 uint64_t
Size = getContext().getTypeSize(RetTy);
1583 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1584 || SeltTy->hasPointerRepresentation())
1592 return getIndirectReturnResult(RetTy,
State);
1597 RetTy = EnumTy->getDecl()->getIntegerType();
1600 if (EIT->getNumBits() > 64)
1601 return getIndirectReturnResult(RetTy,
State);
1618 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1619 for (
const auto &I : CXXRD->bases())
1623 for (
const auto *i : RD->
fields()) {
1636 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1637 unsigned Align)
const {
1640 if (Align <= MinABIStackAlignInBytes)
1648 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
1652 if (!IsDarwinVectorABI) {
1654 return MinABIStackAlignInBytes;
1662 return MinABIStackAlignInBytes;
1666 CCState &
State)
const {
1668 if (
State.FreeRegs) {
1671 return getNaturalAlignIndirectInReg(Ty);
1673 return getNaturalAlignIndirect(Ty,
false);
1677 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1678 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1679 if (StackAlign == 0)
1684 bool Realign = TypeAlign > StackAlign;
1689 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1702 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &
State)
const {
1703 if (!IsSoftFloatABI) {
1709 unsigned Size = getContext().getTypeSize(Ty);
1710 unsigned SizeInRegs = (
Size + 31) / 32;
1712 if (SizeInRegs == 0)
1716 if (SizeInRegs >
State.FreeRegs) {
1725 if (SizeInRegs >
State.FreeRegs || SizeInRegs > 2)
1729 State.FreeRegs -= SizeInRegs;
1733 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &
State,
1735 bool &NeedsPadding)
const {
1742 NeedsPadding =
false;
1745 if (!updateFreeRegs(Ty,
State))
1751 if (
State.CC == llvm::CallingConv::X86_FastCall ||
1752 State.CC == llvm::CallingConv::X86_VectorCall ||
1753 State.CC == llvm::CallingConv::X86_RegCall) {
1754 if (getContext().getTypeSize(Ty) <= 32 &&
State.FreeRegs)
1755 NeedsPadding =
true;
1763 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &
State)
const {
1764 if (!updateFreeRegs(Ty,
State))
1770 if (
State.CC == llvm::CallingConv::X86_FastCall ||
1771 State.CC == llvm::CallingConv::X86_VectorCall ||
1772 State.CC == llvm::CallingConv::X86_RegCall) {
1773 if (getContext().getTypeSize(Ty) > 32)
1794 for (
int I = 0, E = Args.size(); I < E; ++I) {
1796 uint64_t NumElts = 0;
1799 isHomogeneousAggregate(Ty,
Base, NumElts)) {
1800 if (
State.FreeSSERegs >= NumElts) {
1801 State.FreeSSERegs -= NumElts;
1803 State.IsPreassigned.set(I);
1810 CCState &
State)
const {
1812 bool IsFastCall =
State.CC == llvm::CallingConv::X86_FastCall;
1813 bool IsRegCall =
State.CC == llvm::CallingConv::X86_RegCall;
1814 bool IsVectorCall =
State.CC == llvm::CallingConv::X86_VectorCall;
1817 TypeInfo TI = getContext().getTypeInfo(Ty);
1824 return getIndirectResult(Ty,
false,
State);
1834 uint64_t NumElts = 0;
1835 if ((IsRegCall || IsVectorCall) &&
1836 isHomogeneousAggregate(Ty,
Base, NumElts)) {
1837 if (
State.FreeSSERegs >= NumElts) {
1838 State.FreeSSERegs -= NumElts;
1843 return getDirectX86Hva();
1849 return getIndirectResult(Ty,
false,
State);
1856 return getIndirectResult(Ty,
true,
State);
1859 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
1862 llvm::LLVMContext &LLVMContext = getVMContext();
1863 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
1864 bool NeedsPadding =
false;
1866 if (shouldAggregateUseDirect(Ty,
State, InReg, NeedsPadding)) {
1867 unsigned SizeInRegs = (TI.
Width + 31) / 32;
1869 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1875 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
1880 return getIndirectResult(Ty,
false,
State);
1888 if (TI.
Width <= 4 * 32 && (!IsMCUABI ||
State.FreeRegs == 0) &&
1889 canExpandIndirectArgument(Ty))
1891 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1893 return getIndirectResult(Ty,
true,
State);
1900 if (IsWin32StructABI) {
1901 if (TI.
Width <= 512 &&
State.FreeSSERegs > 0) {
1902 --
State.FreeSSERegs;
1905 return getIndirectResult(Ty,
false,
State);
1910 if (IsDarwinVectorABI) {
1912 (TI.
Width == 64 && VT->getNumElements() == 1))
1914 llvm::IntegerType::get(getVMContext(), TI.
Width));
1917 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1925 Ty = EnumTy->getDecl()->getIntegerType();
1927 bool InReg = shouldPrimitiveUseInReg(Ty,
State);
1929 if (isPromotableIntegerTypeForABI(Ty)) {
1936 if (EIT->getNumBits() <= 64) {
1941 return getIndirectResult(Ty,
false,
State);
1953 else if (
State.CC == llvm::CallingConv::X86_FastCall) {
1955 State.FreeSSERegs = 3;
1956 }
else if (
State.CC == llvm::CallingConv::X86_VectorCall) {
1958 State.FreeSSERegs = 6;
1961 else if (
State.CC == llvm::CallingConv::X86_RegCall) {
1963 State.FreeSSERegs = 8;
1964 }
else if (IsWin32StructABI) {
1967 State.FreeRegs = DefaultNumRegisterParameters;
1968 State.FreeSSERegs = 3;
1970 State.FreeRegs = DefaultNumRegisterParameters;
1977 if (
State.FreeRegs) {
1990 if (
State.CC == llvm::CallingConv::X86_VectorCall)
1991 runVectorCallFirstPass(FI,
State);
1993 bool UsedInAlloca =
false;
1995 for (
int I = 0, E = Args.size(); I < E; ++I) {
1997 if (
State.IsPreassigned.test(I))
2007 rewriteWithInAlloca(FI);
2016 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
2021 bool IsIndirect =
false;
2025 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
2027 LLTy = LLTy->getPointerTo(0);
2028 FrameFields.push_back(LLTy);
2029 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
2033 StackOffset = FieldEnd.
alignTo(WordSize);
2034 if (StackOffset != FieldEnd) {
2035 CharUnits NumBytes = StackOffset - FieldEnd;
2036 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
2037 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
2038 FrameFields.push_back(Ty);
2060 llvm_unreachable(
"invalid enum");
2063 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
2064 assert(IsWin32StructABI &&
"inalloca only supported on win32");
2079 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
2081 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
2086 if (
Ret.isIndirect() && !
Ret.getInReg()) {
2089 Ret.setInAllocaSRet(IsWin32StructABI);
2097 for (; I != E; ++I) {
2099 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
2102 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2110 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2117 getTypeStackAlignInBytes(Ty,
TypeInfo.
Align.getQuantity()));
2124 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2126 assert(Triple.getArch() == llvm::Triple::x86);
2128 switch (Opts.getStructReturnConvention()) {
2137 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2140 switch (Triple.getOS()) {
2141 case llvm::Triple::DragonFly:
2142 case llvm::Triple::FreeBSD:
2143 case llvm::Triple::OpenBSD:
2144 case llvm::Triple::Win32:
2153 if (!FD->
hasAttr<AnyX86InterruptAttr>())
2156 llvm::Function *Fn = cast<llvm::Function>(GV);
2157 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2163 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
2164 Fn->getContext(), ByValTy);
2165 Fn->addParamAttr(0, NewAttr);
2168 void X86_32TargetCodeGenInfo::setTargetAttributes(
2170 if (GV->isDeclaration())
2172 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2173 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2174 llvm::Function *Fn = cast<llvm::Function>(GV);
2175 Fn->addFnAttr(
"stackrealign");
2182 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2187 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
2198 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
2204 Builder.CreateAlignedStore(
2205 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty,
Address, 9),
2211 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
2225 enum class X86AVXABILevel {
2232 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2234 case X86AVXABILevel::AVX512:
2236 case X86AVXABILevel::AVX:
2241 llvm_unreachable(
"Unknown AVXLevel");
2266 static Class merge(Class Accum, Class Field);
2282 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2310 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2311 bool isNamedArg,
bool IsRegCall =
false)
const;
2313 llvm::Type *GetByteVectorType(
QualType Ty)
const;
2314 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2315 unsigned IROffset,
QualType SourceTy,
2316 unsigned SourceOffset)
const;
2317 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2318 unsigned IROffset,
QualType SourceTy,
2319 unsigned SourceOffset)
const;
2335 unsigned &neededInt,
unsigned &neededSSE,
2337 bool IsRegCall =
false)
const;
2340 unsigned &NeededSSE,
2341 unsigned &MaxVectorWidth)
const;
2344 unsigned &NeededSSE,
2345 unsigned &MaxVectorWidth)
const;
2347 bool IsIllegalVectorType(
QualType Ty)
const;
2354 bool honorsRevision0_98()
const {
2355 return !getTarget().getTriple().isOSDarwin();
2360 bool classifyIntegerMMXAsSSE()
const {
2362 if (getContext().getLangOpts().getClangABICompat() <=
2366 const llvm::Triple &Triple = getTarget().getTriple();
2367 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2369 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2375 bool passInt128VectorsInMem()
const {
2377 if (getContext().getLangOpts().getClangABICompat() <=
2381 const llvm::Triple &T = getTarget().getTriple();
2382 return T.isOSLinux() || T.isOSNetBSD();
2385 X86AVXABILevel AVXLevel;
2388 bool Has64BitPointers;
2393 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2397 unsigned neededInt, neededSSE;
2403 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2404 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
2416 bool has64BitPointers()
const {
2417 return Has64BitPointers;
2421 bool asReturnValue)
const override {
2424 bool isSwiftErrorInRegister()
const override {
2434 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2441 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
2443 return isX86VectorTypeForVectorCall(getContext(), Ty);
2446 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
2447 uint64_t NumMembers)
const override {
2449 return isX86VectorCallAggregateSmallEnough(NumMembers);
2453 bool asReturnValue)
const override {
2457 bool isSwiftErrorInRegister()
const override {
2463 bool IsVectorCall,
bool IsRegCall)
const;
2467 X86AVXABILevel AVXLevel;
2477 const X86_64ABIInfo &getABIInfo()
const {
2483 bool markARCOptimizedReturnCallsAsNoTail()
const override {
return true; }
2490 llvm::Value *
Address)
const override {
2491 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
2500 StringRef Constraint,
2501 llvm::Type* Ty)
const override {
2502 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2505 bool isNoProtoCallVariadic(
const CallArgList &args,
2514 bool HasAVXType =
false;
2515 for (CallArgList::const_iterator
2516 it = args.begin(), ie = args.end(); it != ie; ++it) {
2517 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2532 unsigned Sig = (0xeb << 0) |
2536 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2539 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2541 if (GV->isDeclaration())
2543 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2544 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2545 llvm::Function *Fn = cast<llvm::Function>(GV);
2546 Fn->addFnAttr(
"stackrealign");
2559 static void initFeatureMaps(
const ASTContext &Ctx,
2560 llvm::StringMap<bool> &CallerMap,
2562 llvm::StringMap<bool> &CalleeMap,
2564 if (CalleeMap.empty() && CallerMap.empty()) {
2575 const llvm::StringMap<bool> &CallerMap,
2576 const llvm::StringMap<bool> &CalleeMap,
2579 bool CallerHasFeat = CallerMap.lookup(Feature);
2580 bool CalleeHasFeat = CalleeMap.lookup(Feature);
2581 if (!CallerHasFeat && !CalleeHasFeat)
2582 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
2583 << IsArgument << Ty << Feature;
2586 if (!CallerHasFeat || !CalleeHasFeat)
2587 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
2588 << IsArgument << Ty << Feature;
2597 const llvm::StringMap<bool> &CallerMap,
2598 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
2602 return checkAVXParamFeature(
Diag, CallLoc, CallerMap, CalleeMap, Ty,
2603 "avx512f", IsArgument);
2606 return checkAVXParamFeature(
Diag, CallLoc, CallerMap, CalleeMap, Ty,
"avx",
2612 void X86_64TargetCodeGenInfo::checkFunctionCallABI(
2615 llvm::StringMap<bool> CallerMap;
2616 llvm::StringMap<bool> CalleeMap;
2617 unsigned ArgIndex = 0;
2621 for (
const CallArg &Arg : Args) {
2629 if (Arg.getType()->isVectorType() &&
2631 initFeatureMaps(CGM.
getContext(), CallerMap, Caller, CalleeMap, Callee);
2635 if (ArgIndex < Callee->getNumParams())
2636 Ty =
Callee->getParamDecl(ArgIndex)->getType();
2639 CalleeMap, Ty,
true))
2647 if (
Callee->getReturnType()->isVectorType() &&
2649 initFeatureMaps(CGM.
getContext(), CallerMap, Caller, CalleeMap, Callee);
2651 CalleeMap,
Callee->getReturnType(),
2656 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2660 bool Quote = Lib.contains(
' ');
2663 if (!Lib.endswith_insensitive(
".lib") && !Lib.endswith_insensitive(
".a"))
2665 ArgStr += Quote ?
"\"" :
"";
2669 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2672 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2673 unsigned NumRegisterParameters)
2674 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2675 Win32StructABI, NumRegisterParameters,
false) {}
2677 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2680 void getDependentLibraryOption(llvm::StringRef Lib,
2682 Opt =
"/DEFAULTLIB:";
2683 Opt += qualifyWindowsLibrary(Lib);
2686 void getDetectMismatchOption(llvm::StringRef Name,
2687 llvm::StringRef
Value,
2689 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
2693 static void addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2695 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2698 Fn->addFnAttr(
"stack-probe-size",
2701 Fn->addFnAttr(
"no-stack-arg-probe");
2705 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2707 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2708 if (GV->isDeclaration())
2710 addStackProbeTargetAttributes(D, GV, CGM);
2716 X86AVXABILevel AVXLevel)
2719 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2727 llvm::Value *
Address)
const override {
2728 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
2736 void getDependentLibraryOption(llvm::StringRef Lib,
2738 Opt =
"/DEFAULTLIB:";
2739 Opt += qualifyWindowsLibrary(Lib);
2742 void getDetectMismatchOption(llvm::StringRef Name,
2743 llvm::StringRef
Value,
2745 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
2749 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2752 if (GV->isDeclaration())
2754 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2755 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2756 llvm::Function *Fn = cast<llvm::Function>(GV);
2757 Fn->addFnAttr(
"stackrealign");
2763 addStackProbeTargetAttributes(D, GV, CGM);
2767 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2792 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2794 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2796 if (Hi == SSEUp && Lo != SSE)
2800 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2824 assert((Accum != Memory && Accum != ComplexX87) &&
2825 "Invalid accumulated classification during merge.");
2826 if (Accum == Field || Field == NoClass)
2828 if (Field == Memory)
2830 if (Accum == NoClass)
2832 if (Accum == Integer || Field == Integer)
2834 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2835 Accum == X87 || Accum == X87Up)
2840 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase, Class &Lo,
2841 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
2852 Class &Current = OffsetBase < 64 ? Lo : Hi;
2858 if (k == BuiltinType::Void) {
2860 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2863 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2866 k == BuiltinType::Float16) {
2868 }
else if (k == BuiltinType::LongDouble) {
2869 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2870 if (LDF == &llvm::APFloat::IEEEquad()) {
2873 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2876 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2879 llvm_unreachable(
"unexpected long double representation!");
2888 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2899 if (Has64BitPointers) {
2906 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2907 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2908 if (EB_FuncPtr != EB_ThisAdj) {
2921 uint64_t
Size = getContext().getTypeSize(VT);
2922 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2931 uint64_t EB_Lo = (OffsetBase) / 64;
2932 uint64_t EB_Hi = (OffsetBase +
Size - 1) / 64;
2935 }
else if (Size == 64) {
2936 QualType ElementType = VT->getElementType();
2945 if (!classifyIntegerMMXAsSSE() &&
2956 if (OffsetBase && OffsetBase != 64)
2958 }
else if (Size == 128 ||
2959 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2960 QualType ElementType = VT->getElementType();
2963 if (passInt128VectorsInMem() &&
Size != 128 &&
2991 uint64_t
Size = getContext().getTypeSize(Ty);
2995 else if (Size <= 128)
2997 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy) {
2999 }
else if (ET == getContext().DoubleTy) {
3001 }
else if (ET == getContext().LongDoubleTy) {
3002 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3003 if (LDF == &llvm::APFloat::IEEEquad())
3005 else if (LDF == &llvm::APFloat::x87DoubleExtended())
3006 Current = ComplexX87;
3007 else if (LDF == &llvm::APFloat::IEEEdouble())
3010 llvm_unreachable(
"unexpected long double representation!");
3015 uint64_t EB_Real = (OffsetBase) / 64;
3016 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
3017 if (Hi == NoClass && EB_Real != EB_Imag)
3024 if (EITy->getNumBits() <= 64)
3026 else if (EITy->getNumBits() <= 128)
3035 uint64_t
Size = getContext().getTypeSize(Ty);
3041 if (!IsRegCall && Size > 512)
3048 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
3054 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
3055 uint64_t ArraySize = AT->getSize().getZExtValue();
3062 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
3065 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
3066 Class FieldLo, FieldHi;
3067 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
3068 Lo = merge(Lo, FieldLo);
3069 Hi = merge(Hi, FieldHi);
3070 if (Lo == Memory || Hi == Memory)
3074 postMerge(Size, Lo, Hi);
3075 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
3080 uint64_t
Size = getContext().getTypeSize(Ty);
3105 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3106 for (
const auto &I : CXXRD->bases()) {
3107 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3108 "Unexpected base class!");
3117 Class FieldLo, FieldHi;
3120 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
3121 Lo = merge(Lo, FieldLo);
3122 Hi = merge(Hi, FieldHi);
3123 if (Lo == Memory || Hi == Memory) {
3124 postMerge(Size, Lo, Hi);
3132 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
3134 getContext().getTargetInfo().getTriple().isPS4();
3135 bool IsUnion = RT->
isUnionType() && !UseClang11Compat;
3138 i != e; ++i, ++idx) {
3140 bool BitField = i->isBitField();
3143 if (BitField && i->isUnnamedBitfield())
3156 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
3157 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
3159 postMerge(Size, Lo, Hi);
3163 if (!BitField &&
Offset % getContext().getTypeAlign(i->getType())) {
3165 postMerge(Size, Lo, Hi);
3175 Class FieldLo, FieldHi;
3181 assert(!i->isUnnamedBitfield());
3183 uint64_t
Size = i->getBitWidthValue(getContext());
3185 uint64_t EB_Lo =
Offset / 64;
3189 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
3194 FieldHi = EB_Hi ?
Integer : NoClass;
3197 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
3198 Lo = merge(Lo, FieldLo);
3199 Hi = merge(Hi, FieldHi);
3200 if (Lo == Memory || Hi == Memory)
3204 postMerge(Size, Lo, Hi);
3214 Ty = EnumTy->getDecl()->getIntegerType();
3217 return getNaturalAlignIndirect(Ty);
3223 return getNaturalAlignIndirect(Ty);
3226 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
3228 uint64_t
Size = getContext().getTypeSize(VecTy);
3229 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
3230 if (Size <= 64 || Size > LargestVector)
3232 QualType EltTy = VecTy->getElementType();
3233 if (passInt128VectorsInMem() &&
3243 unsigned freeIntRegs)
const {
3256 Ty = EnumTy->getDecl()->getIntegerType();
3267 unsigned Align =
std::max(getContext().getTypeAlign(Ty) / 8, 8
U);
3290 if (freeIntRegs == 0) {
3291 uint64_t
Size = getContext().getTypeSize(Ty);
3295 if (Align == 8 && Size <= 64)
3305 llvm::Type *X86_64ABIInfo::GetByteVectorType(
QualType Ty)
const {
3311 llvm::Type *IRType = CGT.ConvertType(Ty);
3312 if (isa<llvm::VectorType>(IRType)) {
3315 if (passInt128VectorsInMem() &&
3316 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
3318 uint64_t
Size = getContext().getTypeSize(Ty);
3319 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3326 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3330 uint64_t
Size = getContext().getTypeSize(Ty);
3331 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
3335 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3352 if (TySize <= StartBit)
3357 unsigned NumElts = (
unsigned)AT->getSize().getZExtValue();
3360 for (
unsigned i = 0; i != NumElts; ++i) {
3362 unsigned EltOffset = i*EltSize;
3363 if (EltOffset >= EndBit)
break;
3365 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3367 EndBit-EltOffset, Context))
3379 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3380 for (
const auto &I : CXXRD->bases()) {
3381 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3382 "Unexpected base class!");
3388 if (BaseOffset >= EndBit)
continue;
3390 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3392 EndBit-BaseOffset, Context))
3403 i != e; ++i, ++idx) {
3407 if (FieldOffset >= EndBit)
break;
3409 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3425 const llvm::DataLayout &TD) {
3426 if (IROffset == 0 && IRType->isFloatingPointTy())
3430 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3431 if (!STy->getNumContainedTypes())
3434 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3435 unsigned Elt = SL->getElementContainingOffset(IROffset);
3436 IROffset -= SL->getElementOffset(Elt);
3441 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3442 llvm::Type *EltTy = ATy->getElementType();
3443 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3444 IROffset -= IROffset / EltSize * EltSize;
3453 llvm::Type *X86_64ABIInfo::
3454 GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
3455 QualType SourceTy,
unsigned SourceOffset)
const {
3456 const llvm::DataLayout &TD = getDataLayout();
3457 unsigned SourceSize =
3458 (
unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
3460 if (!T0 || T0->isDoubleTy())
3461 return llvm::Type::getDoubleTy(getVMContext());
3464 llvm::Type *T1 =
nullptr;
3465 unsigned T0Size = TD.getTypeAllocSize(T0);
3466 if (SourceSize > T0Size)
3468 if (T1 ==
nullptr) {
3471 if (T0->isHalfTy() && SourceSize > 4)
3480 if (T0->isFloatTy() && T1->isFloatTy())
3481 return llvm::FixedVectorType::get(T0, 2);
3483 if (T0->isHalfTy() && T1->isHalfTy()) {
3484 llvm::Type *T2 =
nullptr;
3488 return llvm::FixedVectorType::get(T0, 2);
3489 return llvm::FixedVectorType::get(T0, 4);
3492 if (T0->isHalfTy() || T1->isHalfTy())
3493 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
3495 return llvm::Type::getDoubleTy(getVMContext());
3513 llvm::Type *X86_64ABIInfo::
3514 GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
3515 QualType SourceTy,
unsigned SourceOffset)
const {
3518 if (IROffset == 0) {
3520 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3521 IRType->isIntegerTy(64))
3530 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3531 IRType->isIntegerTy(32) ||
3532 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3533 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3534 cast<llvm::IntegerType>(IRType)->getBitWidth();
3537 SourceOffset*8+64, getContext()))
3542 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3544 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3545 if (IROffset < SL->getSizeInBytes()) {
3546 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3547 IROffset -= SL->getElementOffset(FieldIdx);
3549 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3550 SourceTy, SourceOffset);
3554 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3555 llvm::Type *EltTy = ATy->getElementType();
3556 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3557 unsigned EltOffset = IROffset/EltSize*EltSize;
3558 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3564 unsigned TySizeInBytes =
3565 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3567 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3571 return llvm::IntegerType::get(getVMContext(),
3572 std::min(TySizeInBytes-SourceOffset, 8
U)*8);
3583 const llvm::DataLayout &TD) {
3588 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
3589 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3590 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3591 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3603 if (Lo->isHalfTy() || Lo->isFloatTy())
3604 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3606 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3607 &&
"Invalid/unknown lo type");
3608 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3612 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3615 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3616 "Invalid x86-64 argument pair!");
3624 X86_64ABIInfo::Class Lo, Hi;
3625 classify(RetTy, 0, Lo, Hi,
true);
3628 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3629 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3631 llvm::Type *ResType =
nullptr;
3638 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3639 "Unknown missing lo part");
3644 llvm_unreachable(
"Invalid classification for lo word.");
3649 return getIndirectReturnResult(RetTy);
3654 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3658 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3661 RetTy = EnumTy->getDecl()->getIntegerType();
3664 isPromotableIntegerTypeForABI(RetTy))
3672 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3678 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3685 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3686 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3687 llvm::Type::getX86_FP80Ty(getVMContext()));
3691 llvm::Type *HighPart =
nullptr;
3697 llvm_unreachable(
"Invalid classification for hi word.");
3704 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3709 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3720 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3721 ResType = GetByteVectorType(RetTy);
3732 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3750 unsigned &neededInt,
unsigned &neededSSE,
3751 bool isNamedArg,
bool IsRegCall)
const {
3754 X86_64ABIInfo::Class Lo, Hi;
3755 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
3759 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3760 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3764 llvm::Type *ResType =
nullptr;
3771 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3772 "Unknown missing lo part");
3785 return getIndirectResult(Ty, freeIntRegs);
3789 llvm_unreachable(
"Invalid classification for lo word.");
3798 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3802 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3805 Ty = EnumTy->getDecl()->getIntegerType();
3808 isPromotableIntegerTypeForABI(Ty))
3818 llvm::Type *IRType = CGT.ConvertType(Ty);
3819 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3825 llvm::Type *HighPart =
nullptr;
3833 llvm_unreachable(
"Invalid classification for hi word.");
3835 case NoClass:
break;
3840 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3850 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3862 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3863 ResType = GetByteVectorType(Ty);
3877 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3878 unsigned &NeededSSE,
3879 unsigned &MaxVectorWidth)
const {
3881 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3884 return getIndirectReturnResult(Ty);
3887 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->
getDecl())) {
3888 if (CXXRD->isDynamicClass()) {
3889 NeededInt = NeededSSE = 0;
3890 return getIndirectReturnResult(Ty);
3893 for (
const auto &I : CXXRD->bases())
3894 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
3897 NeededInt = NeededSSE = 0;
3898 return getIndirectReturnResult(Ty);
3906 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
3909 NeededInt = NeededSSE = 0;
3910 return getIndirectReturnResult(Ty);
3913 unsigned LocalNeededInt, LocalNeededSSE;
3917 NeededInt = NeededSSE = 0;
3918 return getIndirectReturnResult(Ty);
3920 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
3921 MTy = AT->getElementType();
3923 if (getContext().getTypeSize(VT) > MaxVectorWidth)
3924 MaxVectorWidth = getContext().getTypeSize(VT);
3925 NeededInt += LocalNeededInt;
3926 NeededSSE += LocalNeededSSE;
3934 X86_64ABIInfo::classifyRegCallStructType(
QualType Ty,
unsigned &NeededInt,
3935 unsigned &NeededSSE,
3936 unsigned &MaxVectorWidth)
const {
3942 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
3953 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3954 Win64ABIInfo.computeInfo(FI);
3958 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
3961 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3962 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3963 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
3970 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3971 FreeIntRegs -= NeededInt;
3972 FreeSSERegs -= NeededSSE;
3980 getContext().LongDoubleTy)
3992 else if (NeededSSE && MaxVectorWidth > 0)
4004 it != ie; ++it, ++ArgNo) {
4005 bool IsNamedArg = ArgNo < NumRequiredArgs;
4007 if (IsRegCall && it->type->isStructureOrClassType())
4008 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
4012 NeededSSE, IsNamedArg);
4018 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
4019 FreeIntRegs -= NeededInt;
4020 FreeSSERegs -= NeededSSE;
4024 it->info = getIndirectResult(it->type, FreeIntRegs);
4033 llvm::Value *overflow_arg_area =
4049 CGF.
Builder.CreateBitCast(overflow_arg_area,
4050 llvm::PointerType::getUnqual(LTy));
4059 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
4061 Offset,
"overflow_arg_area.next");
4065 return Address(Res, LTy, Align);
4077 unsigned neededInt, neededSSE;
4085 if (!neededInt && !neededSSE)
4099 llvm::Value *InRegs =
nullptr;
4101 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
4105 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
4106 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
4112 llvm::Value *FitsInFP =
4113 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
4114 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
4115 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
4121 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4142 if (neededInt && neededSSE) {
4144 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
4148 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
4149 llvm::Type *TyLo = ST->getElementType(0);
4150 llvm::Type *TyHi = ST->getElementType(1);
4151 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
4152 "Unexpected ABI info for mixed regs");
4153 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
4154 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
4155 llvm::Value *GPAddr =
4157 llvm::Value *FPAddr =
4159 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
4160 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
4165 TyLo, CGF.
Builder.CreateBitCast(RegLoAddr, PTyLo),
4171 TyHi, CGF.
Builder.CreateBitCast(RegHiAddr, PTyHi),
4176 }
else if (neededInt) {
4182 auto TInfo = getContext().getTypeInfoInChars(Ty);
4183 uint64_t TySize = TInfo.Width.getQuantity();
4194 }
else if (neededSSE == 1) {
4199 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
4219 RegAddrLo, ST->getStructElementType(0)));
4222 RegAddrHi, ST->getStructElementType(1)));
4232 llvm::Value *
Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
4237 llvm::Value *
Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
4260 uint64_t Width = getContext().getTypeSize(Ty);
4261 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4269 ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
4272 uint64_t NumElts = 0;
4275 isHomogeneousAggregate(Ty,
Base, NumElts) && FreeSSERegs >= NumElts) {
4276 FreeSSERegs -= NumElts;
4277 return getDirectX86Hva();
4283 bool IsReturnType,
bool IsVectorCall,
4284 bool IsRegCall)
const {
4290 Ty = EnumTy->getDecl()->getIntegerType();
4292 TypeInfo Info = getContext().getTypeInfo(Ty);
4293 uint64_t Width = Info.
Width;
4298 if (!IsReturnType) {
4304 return getNaturalAlignIndirect(Ty,
false);
4309 uint64_t NumElts = 0;
4312 if ((IsVectorCall || IsRegCall) &&
4313 isHomogeneousAggregate(Ty,
Base, NumElts)) {
4315 if (FreeSSERegs >= NumElts) {
4316 FreeSSERegs -= NumElts;
4322 }
else if (IsVectorCall) {
4323 if (FreeSSERegs >= NumElts &&
4325 FreeSSERegs -= NumElts;
4327 }
else if (IsReturnType) {
4339 llvm::Type *LLTy = CGT.ConvertType(Ty);
4340 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
4347 if (Width > 64 || !llvm::isPowerOf2_64(Width))
4348 return getNaturalAlignIndirect(Ty,
false);
4355 switch (BT->getKind()) {
4356 case BuiltinType::Bool:
4361 case BuiltinType::LongDouble:
4365 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4366 if (LDF == &llvm::APFloat::x87DoubleExtended())
4371 case BuiltinType::Int128:
4372 case BuiltinType::UInt128:
4382 llvm::Type::getInt64Ty(getVMContext()), 2));
4405 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4406 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4410 if (CC == llvm::CallingConv::X86_64_SysV) {
4411 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4412 SysVABIInfo.computeInfo(FI);
4416 unsigned FreeSSERegs = 0;
4420 }
else if (IsRegCall) {
4427 IsVectorCall, IsRegCall);
4432 }
else if (IsRegCall) {
4437 unsigned ArgNum = 0;
4438 unsigned ZeroSSERegs = 0;
4443 unsigned *MaybeFreeSSERegs =
4444 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
4446 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
4454 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
4462 uint64_t Width = getContext().getTypeSize(Ty);
4463 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4472 llvm::Value *
Address,
bool Is64Bit,
4479 llvm::IntegerType *i8 = CGF.
Int8Ty;
4480 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4481 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4482 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4535 class AIXABIInfo :
public ABIInfo {
4537 const unsigned PtrByteSize;
4542 :
ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
4544 bool isPromotableTypeForABI(
QualType Ty)
const;
4573 llvm::Value *
Address)
const override;
4579 bool AIXABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4582 Ty = EnumTy->getDecl()->getIntegerType();
4595 switch (BT->getKind()) {
4596 case BuiltinType::Int:
4597 case BuiltinType::UInt:
4617 return getNaturalAlignIndirect(RetTy);
4638 CharUnits CCAlign = getParamTypeAlignment(Ty);
4639 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
4652 Ty = CTy->getElementType();
4667 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4681 if (EltSize < SlotSize)
4689 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
4697 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4698 bool IsSoftFloatABI;
4699 bool IsRetSmallStructInRegABI;
4705 bool RetSmallStructInRegABI)
4706 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
4707 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
4724 PPC32TargetCodeGenInfo(
CodeGenTypes &CGT,
bool SoftFloatABI,
4725 bool RetSmallStructInRegABI)
4727 CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
4729 static bool isStructReturnInRegABI(
const llvm::Triple &Triple,
4738 llvm::Value *
Address)
const override;
4745 Ty = CTy->getElementType();
4753 const Type *AlignTy =
nullptr;
4756 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4771 (Size = getContext().getTypeSize(RetTy)) <= 64) {
4786 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
4798 if (getTarget().getTriple().isOSDarwin()) {
4799 auto TI = getContext().getTypeInfoInChars(Ty);
4800 TI.
Align = getParamTypeAlignment(Ty);
4808 const unsigned OverflowLimit = 8;
4823 bool isI64 = Ty->
isIntegerType() && getContext().getTypeSize(Ty) == 64;
4825 bool isF64 = Ty->
isFloatingType() && getContext().getTypeSize(Ty) == 64;
4835 if (isInt || IsSoftFloatABI) {
4836 NumRegsAddr = Builder.CreateStructGEP(VAList, 0,
"gpr");
4838 NumRegsAddr = Builder.CreateStructGEP(VAList, 1,
"fpr");
4841 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr,
"numUsedRegs");
4844 if (isI64 || (isF64 && IsSoftFloatABI)) {
4845 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4846 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1
U));
4850 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4856 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4858 llvm::Type *DirectTy = CGF.
ConvertType(Ty), *ElementTy = DirectTy;
4859 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4866 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4867 RegAddr =
Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.
Int8Ty,
4872 if (!(isInt || IsSoftFloatABI)) {
4873 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4880 llvm::Value *RegOffset =
4881 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4885 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4889 Builder.CreateAdd(NumRegs,
4890 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4891 Builder.CreateStore(NumRegs, NumRegsAddr);
4901 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4914 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4916 Address(Builder.CreateLoad(OverflowAreaAddr,
"argp.cur"), CGF.
Int8Ty,
4920 if (Align > OverflowAreaAlign) {
4921 llvm::Value *Ptr = OverflowArea.
getPointer();
4926 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4929 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4930 Builder.CreateStore(OverflowArea.
getPointer(), OverflowAreaAddr);
4942 Result =
Address(Builder.CreateLoad(Result,
"aggr"), ElementTy,
4943 getContext().getTypeAlignInChars(Ty));
4949 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
4951 assert(Triple.isPPC32());
4953 switch (Opts.getStructReturnConvention()) {
4962 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
4987 static const unsigned GPRBits = 64;
4989 bool IsSoftFloatABI;
4996 bool isPromotableTypeForABI(
QualType Ty)
const;
5002 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
5003 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
5004 uint64_t Members)
const override;
5022 if ((T->
isVectorType() && getContext().getTypeSize(T) == 128) ||
5037 bool asReturnValue)
const override {
5041 bool isSwiftErrorInRegister()
const override {
5050 PPC64_SVR4_ABIInfo::ABIKind
Kind,
5053 std::make_unique<PPC64_SVR4_ABIInfo>(CGT,
Kind, SoftFloatABI)) {}
5061 llvm::Value *
Address)
const override;
5064 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
5066 PPC64TargetCodeGenInfo(
CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
5074 llvm::Value *
Address)
const override;
5082 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
5085 Ty = EnumTy->getDecl()->getIntegerType();
5088 if (isPromotableIntegerTypeForABI(Ty))
5095 case BuiltinType::Int:
5096 case BuiltinType::UInt:
5103 if (EIT->getNumBits() < 64)
5114 Ty = CTy->getElementType();
5116 auto FloatUsesVector = [
this](
QualType Ty){
5118 Ty) == &llvm::APFloat::IEEEquad();
5125 }
else if (FloatUsesVector(Ty)) {
5134 const Type *AlignAsType =
nullptr;
5138 if ((EltType->
isVectorType() && getContext().getTypeSize(EltType) == 128) ||
5140 AlignAsType = EltType;
5145 uint64_t Members = 0;
5146 if (!AlignAsType &&
Kind == ELFv2 &&
5153 FloatUsesVector(
QualType(AlignAsType, 0));
5170 uint64_t &Members)
const {
5172 uint64_t NElements = AT->getSize().getZExtValue();
5177 Members *= NElements;
5187 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
5188 if (!
getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
5191 for (
const auto &I : CXXRD->bases()) {
5196 uint64_t FldMembers;
5200 Members += FldMembers;
5204 for (
const auto *FD : RD->
fields()) {
5209 if (AT->getSize().getZExtValue() == 0)
5211 FT = AT->getElementType();
5221 uint64_t FldMembers;
5226 std::max(Members, FldMembers) : Members + FldMembers);
5240 Ty = CT->getElementType();
5256 QualType EltTy = VT->getElementType();
5257 unsigned NumElements =
5272 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5277 BT->
getKind() == BuiltinType::Double ||
5278 BT->
getKind() == BuiltinType::LongDouble ||
5279 BT->
getKind() == BuiltinType::Ibm128 ||
5280 (getContext().getTargetInfo().hasFloat128Type() &&
5281 (BT->
getKind() == BuiltinType::Float128))) {
5288 if (getContext().getTypeSize(VT) == 128)
5294 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
5295 const Type *
Base, uint64_t Members)
const {
5299 ((getContext().getTargetInfo().hasFloat128Type() &&
5300 Base->isFloat128Type()) ||
5301 Base->isVectorType()) ? 1
5302 : (getContext().getTypeSize(
Base) + 63) / 64;
5305 return Members * NumRegs <= 8;
5318 uint64_t
Size = getContext().getTypeSize(Ty);
5320 return getNaturalAlignIndirect(Ty,
false);
5321 else if (Size < 128) {
5322 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5328 if (EIT->getNumBits() > 128)
5329 return getNaturalAlignIndirect(Ty,
true);
5335 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
5336 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).
getQuantity();
5340 uint64_t Members = 0;
5341 if (
Kind == ELFv2 &&
5342 isHomogeneousAggregate(Ty,
Base, Members)) {
5343 llvm::Type *BaseTy = CGT.ConvertType(
QualType(
Base, 0));
5344 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5352 uint64_t Bits = getContext().getTypeSize(Ty);
5353 if (Bits > 0 && Bits <= 8 * GPRBits) {
5354 llvm::Type *CoerceTy;
5358 if (Bits <= GPRBits)
5360 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5364 uint64_t RegBits = ABIAlign * 8;
5365 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
5366 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
5367 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
5376 TyAlign > ABIAlign);
5394 uint64_t
Size = getContext().getTypeSize(RetTy);
5396 return getNaturalAlignIndirect(RetTy);
5397 else if (Size < 128) {
5398 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5404 if (EIT->getNumBits() > 128)
5405 return getNaturalAlignIndirect(RetTy,
false);
5410 uint64_t Members = 0;
5411 if (
Kind == ELFv2 &&
5412 isHomogeneousAggregate(RetTy,
Base, Members)) {
5413 llvm::Type *BaseTy = CGT.ConvertType(
QualType(
Base, 0));
5414 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5419 uint64_t Bits = getContext().getTypeSize(RetTy);
5420 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
5424 llvm::Type *CoerceTy;
5425 if (Bits > GPRBits) {
5426 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
5427 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
5430 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5435 return getNaturalAlignIndirect(RetTy);
5445 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
5458 if (EltSize < SlotSize)
5468 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
5504 ABIKind getABIKind()
const {
return Kind; }
5505 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
5509 unsigned CallingConvention)
const;
5511 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
5512 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
5513 uint64_t Members)
const override;
5515 bool isIllegalVectorType(
QualType Ty)
const;
5536 if (isa<llvm::ScalableVectorType>(BaseTy))
5537 llvm::report_fatal_error(
"Passing SVE types to variadic functions is "
5538 "currently not supported");
5540 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5541 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5542 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5549 bool asReturnValue)
const override {
5552 bool isSwiftErrorInRegister()
const override {
5556 bool isLegalVectorTypeForSwift(
CharUnits totalSize, llvm::Type *eltTy,
5557 unsigned elts)
const override;
5559 bool allowBFloatArgsAndRet()
const override {
5560 return getTarget().hasBFloat16Type();
5569 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5570 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5577 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
5579 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5581 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5585 const auto *TA = FD->
getAttr<TargetAttr>();
5590 if (
Attr.BranchProtection.empty())
5597 assert(
Error.empty());
5599 auto *Fn = cast<llvm::Function>(GV);
5600 static const char *SignReturnAddrStr[] = {
"none",
"non-leaf",
"all"};
5601 Fn->addFnAttr(
"sign-return-address", SignReturnAddrStr[
static_cast<int>(BPI.
SignReturnAddr)]);
5604 Fn->addFnAttr(
"sign-return-address-key",
5610 Fn->addFnAttr(
"branch-target-enforcement",
5615 llvm::Type *Ty)
const override {
5617 auto *ST = dyn_cast<llvm::StructType>(Ty);
5618 if (ST && ST->getNumElements() == 1) {
5619 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
5620 if (AT && AT->getNumElements() == 8 &&
5621 AT->getElementType()->isIntegerTy(64))
5629 class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
5631 WindowsAArch64TargetCodeGenInfo(
CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5632 : AArch64TargetCodeGenInfo(CGT, K) {}
5634 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5637 void getDependentLibraryOption(llvm::StringRef Lib,
5639 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5642 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
5644 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
5648 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5650 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5651 if (GV->isDeclaration())
5653 addStackProbeTargetAttributes(D, GV, CGM);
5662 assert(VT->getElementType()->isBuiltinType() &&
"expected builtin type!");
5664 BuiltinType::UChar &&
5665 "unexpected builtin type for SVE predicate!");
5667 llvm::Type::getInt1Ty(getVMContext()), 16));
5671 assert(VT->getElementType()->isBuiltinType() &&
"expected builtin type!");
5674 llvm::ScalableVectorType *ResType =
nullptr;
5677 llvm_unreachable(
"unexpected builtin type for SVE vector!");
5678 case BuiltinType::SChar:
5679 case BuiltinType::UChar:
5680 ResType = llvm::ScalableVectorType::get(
5681 llvm::Type::getInt8Ty(getVMContext()), 16);
5683 case BuiltinType::Short:
5684 case BuiltinType::UShort:
5685 ResType = llvm::ScalableVectorType::get(
5686 llvm::Type::getInt16Ty(getVMContext()), 8);
5688 case BuiltinType::Int:
5689 case BuiltinType::UInt:
5690 ResType = llvm::ScalableVectorType::get(
5691 llvm::Type::getInt32Ty(getVMContext()), 4);
5693 case BuiltinType::Long:
5694 case BuiltinType::ULong:
5695 ResType = llvm::ScalableVectorType::get(
5696 llvm::Type::getInt64Ty(getVMContext()), 2);
5698 case BuiltinType::Half:
5699 ResType = llvm::ScalableVectorType::get(
5700 llvm::Type::getHalfTy(getVMContext()), 8);
5703 ResType = llvm::ScalableVectorType::get(
5704 llvm::Type::getFloatTy(getVMContext()), 4);
5706 case BuiltinType::Double:
5707 ResType = llvm::ScalableVectorType::get(
5708 llvm::Type::getDoubleTy(getVMContext()), 2);
5710 case BuiltinType::BFloat16:
5711 ResType = llvm::ScalableVectorType::get(
5712 llvm::Type::getBFloatTy(getVMContext()), 8);
5718 uint64_t
Size = getContext().getTypeSize(Ty);