26#include "llvm/ADT/SmallBitVector.h"
27#include "llvm/ADT/StringExtras.h"
28#include "llvm/ADT/StringSwitch.h"
29#include "llvm/ADT/Twine.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/IntrinsicsNVPTX.h"
32#include "llvm/IR/IntrinsicsS390.h"
33#include "llvm/IR/Type.h"
34#include "llvm/Support/MathExtras.h"
35#include "llvm/Support/raw_ostream.h"
36#include "llvm/TargetParser/Triple.h"
40using namespace CodeGen;
58 llvm::LLVMContext &LLVMContext) {
62 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
63 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
73 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
75 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
81 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
87 llvm::Type *Padding)
const {
138 unsigned maxAllRegisters) {
139 unsigned intCount = 0, fpCount = 0;
140 for (llvm::Type *
type : scalarTypes) {
141 if (
type->isPointerTy()) {
143 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
145 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
147 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
152 return (intCount + fpCount > maxAllRegisters);
156 bool AsReturnValue)
const {
161 unsigned NumElts)
const {
175 return CXXABI.getRecordArgABI(RD);
191 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
192 !RT->getDecl()->canPassInRegisters()) {
197 return CXXABI.classifyReturnType(FI);
205 if (UD->
hasAttr<TransparentUnionAttr>()) {
206 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
248 uint64_t Members)
const {
258 raw_ostream &
OS = llvm::errs();
259 OS <<
"(ABIArgInfo Kind=";
262 OS <<
"Direct Type=";
291 OS <<
"CoerceAndExpand Type=";
303 llvm::Value *RoundUp = CGF.
Builder.CreateConstInBoundsGEP1_32(
305 return CGF.
Builder.CreateIntrinsic(
308 nullptr, Ptr->getName() +
".aligned");
330 llvm::Type *DirectTy,
334 bool AllowHigherAlign,
335 bool ForceRightAdjust =
false) {
345 if (AllowHigherAlign && DirectAlign > SlotSize) {
361 (!DirectTy->isStructTy() || ForceRightAdjust)) {
389 bool AllowHigherAlign,
390 bool ForceRightAdjust =
false) {
397 DirectSize = ValueInfo.
Width;
398 DirectAlign = ValueInfo.
Align;
404 DirectTy = DirectTy->getPointerTo(0);
407 DirectAlign, SlotSizeAndAlign,
408 AllowHigherAlign, ForceRightAdjust);
422 SlotSize, SlotSize,
true);
430 2 * SlotSize - EltSize);
448 Address Addr1, llvm::BasicBlock *Block1,
449 Address Addr2, llvm::BasicBlock *Block2,
450 const llvm::Twine &Name =
"") {
452 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(Addr1.
getType(), 2, Name);
460 : Info(
std::move(Info)) {}
506 return llvm::CallingConv::SPIR_KERNEL;
510 llvm::PointerType *T,
QualType QT)
const {
511 return llvm::ConstantPointerNull::get(T);
518 "Address space agnostic languages only");
524 LangAS DestAddr, llvm::Type *DestTy,
bool isNonNull)
const {
527 if (
auto *
C = dyn_cast<llvm::Constant>(Src))
531 Src, DestTy, Src->hasName() ? Src->getName() +
".ascast" :
"");
537 llvm::Type *DestTy)
const {
540 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
546 llvm::AtomicOrdering Ordering,
547 llvm::LLVMContext &Ctx)
const {
548 return Ctx.getOrInsertSyncScopeID(
"");
564 bool WasArray =
false;
567 if (AT->getSize() == 0)
569 FT = AT->getElementType();
589 if (isa<CXXRecordDecl>(RT->
getDecl()) &&
590 (WasArray || !FD->
hasAttr<NoUniqueAddressAttr>()))
608 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
609 for (
const auto &I : CXXRD->bases())
613 for (
const auto *I : RD->
fields())
636 const Type *Found =
nullptr;
639 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
640 for (
const auto &I : CXXRD->bases()) {
658 for (
const auto *FD : RD->
fields()) {
672 if (AT->getSize().getZExtValue() != 1)
674 FT = AT->getElementType();
710 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
713 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
719 llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
722 return Address(Addr, ElementTy, TyAlignForABI);
725 "Unexpected ArgInfo Kind in generic VAArg emitter!");
728 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
730 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
732 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
734 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
748class DefaultABIInfo :
public ABIInfo {
783 return getNaturalAlignIndirect(Ty);
788 Ty = EnumTy->getDecl()->getIntegerType();
792 if (EIT->getNumBits() >
796 return getNaturalAlignIndirect(Ty);
807 return getNaturalAlignIndirect(RetTy);
811 RetTy = EnumTy->getDecl()->getIntegerType();
814 if (EIT->getNumBits() >
815 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
816 ? getContext().Int128Ty
817 : getContext().LongLongTy))
818 return getNaturalAlignIndirect(RetTy);
830class WebAssemblyABIInfo final :
public ABIInfo {
838 DefaultABIInfo defaultInfo;
866 WebAssemblyABIInfo::ABIKind K)
869 std::make_unique<SwiftABIInfo>(CGT,
false);
872 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
875 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
876 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
877 llvm::Function *Fn = cast<llvm::Function>(GV);
878 llvm::AttrBuilder B(GV->getContext());
879 B.addAttribute(
"wasm-import-module",
Attr->getImportModule());
882 if (
const auto *
Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
883 llvm::Function *Fn = cast<llvm::Function>(GV);
884 llvm::AttrBuilder B(GV->getContext());
885 B.addAttribute(
"wasm-import-name",
Attr->getImportName());
888 if (
const auto *
Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
889 llvm::Function *Fn = cast<llvm::Function>(GV);
890 llvm::AttrBuilder B(GV->getContext());
891 B.addAttribute(
"wasm-export-name",
Attr->getExportName());
896 if (
auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
897 llvm::Function *Fn = cast<llvm::Function>(GV);
898 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
899 Fn->addFnAttr(
"no-prototype");
904 virtual llvm::Type *getWasmExternrefReferenceType()
const override {
905 return llvm::Type::getWasm_ExternrefTy(getABIInfo().getVMContext());
908 virtual llvm::Type *getWasmFuncrefReferenceType()
const override {
909 return llvm::Type::getWasm_FuncrefTy(getABIInfo().getVMContext());
931 if (Kind == ABIKind::ExperimentalMV) {
934 bool HasBitField =
false;
936 if (
Field->isBitField()) {
947 return defaultInfo.classifyArgumentType(Ty);
964 if (Kind == ABIKind::ExperimentalMV)
970 return defaultInfo.classifyReturnType(RetTy);
979 getContext().getTypeInfoInChars(Ty),
991class PNaClABIInfo :
public ABIInfo {
1033 return getNaturalAlignIndirect(Ty);
1036 Ty = EnumTy->getDecl()->getIntegerType();
1043 if (EIT->getNumBits() > 64)
1044 return getNaturalAlignIndirect(Ty);
1058 return getNaturalAlignIndirect(RetTy);
1062 if (EIT->getNumBits() > 64)
1063 return getNaturalAlignIndirect(RetTy);
1069 RetTy = EnumTy->getDecl()->getIntegerType();
1076bool IsX86_MMXType(llvm::Type *IRType) {
1078 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1079 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1080 IRType->getScalarSizeInBits() != 64;
1084 StringRef Constraint,
1086 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1087 .Cases(
"y",
"&y",
"^Ym",
true)
1089 if (IsMMXCons && Ty->isVectorTy()) {
1090 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() !=
1107 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1108 if (BT->getKind() == BuiltinType::LongDouble) {
1110 &llvm::APFloat::x87DoubleExtended())
1119 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1127static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1128 return NumMembers <= 4;
1132static ABIArgInfo getDirectX86Hva(llvm::Type* T =
nullptr) {
1146 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1148 llvm::SmallBitVector IsPreassigned;
1149 unsigned CC = CallingConv::CC_C;
1150 unsigned FreeRegs = 0;
1151 unsigned FreeSSERegs = 0;
1155class X86_32ABIInfo :
public ABIInfo {
1161 static const unsigned MinABIStackAlignInBytes = 4;
1163 bool IsDarwinVectorABI;
1164 bool IsRetSmallStructInRegABI;
1165 bool IsWin32StructABI;
1166 bool IsSoftFloatABI;
1169 unsigned DefaultNumRegisterParameters;
1171 static bool isRegisterSize(
unsigned Size) {
1172 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1175 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
1177 return isX86VectorTypeForVectorCall(getContext(), Ty);
1180 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1181 uint64_t NumMembers)
const override {
1183 return isX86VectorCallAggregateSmallEnough(NumMembers);
1195 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
1203 bool updateFreeRegs(
QualType Ty, CCState &State)
const;
1205 bool shouldAggregateUseDirect(
QualType Ty, CCState &State,
bool &InReg,
1206 bool &NeedsPadding)
const;
1207 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const;
1209 bool canExpandIndirectArgument(
QualType Ty)
const;
1218 void runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const;
1227 bool RetSmallStructInRegABI,
bool Win32StructABI,
1228 unsigned NumRegisterParameters,
bool SoftFloatABI)
1229 :
ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1230 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1231 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
1232 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1233 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
1234 CGT.getTarget().getTriple().isOSCygMing()),
1235 DefaultNumRegisterParameters(NumRegisterParameters) {}
1244 bool AsReturnValue)
const override {
1256 bool RetSmallStructInRegABI,
bool Win32StructABI,
1257 unsigned NumRegisterParameters,
bool SoftFloatABI)
1259 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1260 NumRegisterParameters, SoftFloatABI)) {
1261 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
1264 static bool isStructReturnInRegABI(
1267 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1277 llvm::Value *
Address)
const override;
1280 StringRef Constraint,
1281 llvm::Type* Ty)
const override {
1282 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1286 std::string &Constraints,
1287 std::vector<llvm::Type *> &ResultRegTypes,
1288 std::vector<llvm::Type *> &ResultTruncRegTypes,
1289 std::vector<LValue> &ResultRegDests,
1290 std::string &AsmString,
1291 unsigned NumOutputs)
const override;
1295 unsigned Sig = (0xeb << 0) |
1299 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1302 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
1303 return "movl\t%ebp, %ebp"
1304 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1319 unsigned NumNewOuts,
1320 std::string &AsmString) {
1322 llvm::raw_string_ostream
OS(Buf);
1324 while (Pos < AsmString.size()) {
1325 size_t DollarStart = AsmString.find(
'$', Pos);
1326 if (DollarStart == std::string::npos)
1327 DollarStart = AsmString.size();
1328 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1329 if (DollarEnd == std::string::npos)
1330 DollarEnd = AsmString.size();
1331 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1333 size_t NumDollars = DollarEnd - DollarStart;
1334 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1336 size_t DigitStart = Pos;
1337 if (AsmString[DigitStart] ==
'{') {
1341 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1342 if (DigitEnd == std::string::npos)
1343 DigitEnd = AsmString.size();
1344 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1345 unsigned OperandIndex;
1346 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1347 if (OperandIndex >= FirstIn)
1348 OperandIndex += NumNewOuts;
1356 AsmString = std::move(
OS.str());
1360void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1362 std::vector<llvm::Type *> &ResultRegTypes,
1363 std::vector<llvm::Type *> &ResultTruncRegTypes,
1364 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1365 unsigned NumOutputs)
const {
1370 if (!Constraints.empty())
1372 if (RetWidth <= 32) {
1373 Constraints +=
"={eax}";
1374 ResultRegTypes.push_back(CGF.
Int32Ty);
1377 Constraints +=
"=A";
1378 ResultRegTypes.push_back(CGF.
Int64Ty);
1382 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
1383 ResultTruncRegTypes.push_back(CoerceTy);
1388 ResultRegDests.push_back(ReturnSlot);
1395bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1401 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1407 if (Size == 64 || Size == 128)
1422 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1426 if (!RT)
return false;
1438 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1447 Ty = CTy->getElementType();
1457 return Size == 32 || Size == 64;
1462 for (
const auto *FD : RD->
fields()) {
1472 if (FD->isBitField())
1497bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1504 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1505 if (!IsWin32StructABI) {
1508 if (!CXXRD->isCLike())
1512 if (CXXRD->isDynamicClass())
1523 return Size == getContext().getTypeSize(Ty);
1526ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(
QualType RetTy, CCState &State)
const {
1529 if (State.FreeRegs) {
1532 return getNaturalAlignIndirectInReg(RetTy);
1534 return getNaturalAlignIndirect(RetTy,
false);
1538 CCState &State)
const {
1544 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1545 State.CC == llvm::CallingConv::X86_RegCall) &&
1546 isHomogeneousAggregate(RetTy,
Base, NumElts)) {
1553 if (IsDarwinVectorABI) {
1561 llvm::Type::getInt64Ty(getVMContext()), 2));
1565 if ((Size == 8 || Size == 16 || Size == 32) ||
1566 (Size == 64 && VT->getNumElements() == 1))
1570 return getIndirectReturnResult(RetTy, State);
1580 return getIndirectReturnResult(RetTy, State);
1585 return getIndirectReturnResult(RetTy, State);
1596 llvm::Type::getHalfTy(getVMContext()), 2));
1601 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1610 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1611 || SeltTy->hasPointerRepresentation())
1619 return getIndirectReturnResult(RetTy, State);
1624 RetTy = EnumTy->getDecl()->getIntegerType();
1627 if (EIT->getNumBits() > 64)
1628 return getIndirectReturnResult(RetTy, State);
1645 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1646 for (
const auto &I : CXXRD->bases())
1650 for (
const auto *i : RD->
fields()) {
1663unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1664 unsigned Align)
const {
1667 if (Align <= MinABIStackAlignInBytes)
1675 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
1679 if (!IsDarwinVectorABI) {
1681 return MinABIStackAlignInBytes;
1689 return MinABIStackAlignInBytes;
1693 CCState &State)
const {
1695 if (State.FreeRegs) {
1698 return getNaturalAlignIndirectInReg(Ty);
1700 return getNaturalAlignIndirect(Ty,
false);
1704 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1705 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1706 if (StackAlign == 0)
1711 bool Realign = TypeAlign > StackAlign;
1716X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1723 if (K == BuiltinType::Float || K == BuiltinType::Double)
1729bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1730 if (!IsSoftFloatABI) {
1736 unsigned Size = getContext().getTypeSize(Ty);
1737 unsigned SizeInRegs = (
Size + 31) / 32;
1739 if (SizeInRegs == 0)
1743 if (SizeInRegs > State.FreeRegs) {
1752 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1756 State.FreeRegs -= SizeInRegs;
1760bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1762 bool &NeedsPadding)
const {
1769 NeedsPadding =
false;
1772 if (!updateFreeRegs(Ty, State))
1778 if (State.CC == llvm::CallingConv::X86_FastCall ||
1779 State.CC == llvm::CallingConv::X86_VectorCall ||
1780 State.CC == llvm::CallingConv::X86_RegCall) {
1781 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1782 NeedsPadding =
true;
1790bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1791 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
1795 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
1796 State.CC == llvm::CallingConv::X86_VectorCall))
1799 if (!updateFreeRegs(Ty, State))
1802 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
1809void X86_32ABIInfo::runVectorCallFirstPass(
CGFunctionInfo &FI, CCState &State)
const {
1820 for (
int I = 0, E = Args.size(); I < E; ++I) {
1825 isHomogeneousAggregate(Ty,
Base, NumElts)) {
1826 if (State.FreeSSERegs >= NumElts) {
1827 State.FreeSSERegs -= NumElts;
1829 State.IsPreassigned.set(I);
1836 CCState &State)
const {
1838 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1839 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1840 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1843 TypeInfo TI = getContext().getTypeInfo(Ty);
1850 return getIndirectResult(Ty,
false, State);
1861 if ((IsRegCall || IsVectorCall) &&
1862 isHomogeneousAggregate(Ty,
Base, NumElts)) {
1863 if (State.FreeSSERegs >= NumElts) {
1864 State.FreeSSERegs -= NumElts;
1869 return getDirectX86Hva();
1875 return getIndirectResult(Ty,
false, State);
1882 return getIndirectResult(Ty,
true, State);
1885 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
1888 llvm::LLVMContext &LLVMContext = getVMContext();
1889 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
1890 bool NeedsPadding =
false;
1892 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1893 unsigned SizeInRegs = (TI.
Width + 31) / 32;
1895 llvm::Type *
Result = llvm::StructType::get(LLVMContext, Elements);
1901 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
1906 return getIndirectResult(Ty,
false, State);
1914 if (TI.
Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
1915 canExpandIndirectArgument(Ty))
1917 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1919 return getIndirectResult(Ty,
true, State);
1926 if (IsWin32StructABI) {
1927 if (TI.
Width <= 512 && State.FreeSSERegs > 0) {
1928 --State.FreeSSERegs;
1931 return getIndirectResult(Ty,
false, State);
1936 if (IsDarwinVectorABI) {
1938 (TI.
Width == 64 && VT->getNumElements() == 1))
1940 llvm::IntegerType::get(getVMContext(), TI.
Width));
1943 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1951 Ty = EnumTy->getDecl()->getIntegerType();
1953 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1955 if (isPromotableIntegerTypeForABI(Ty)) {
1962 if (EIT->getNumBits() <= 64) {
1967 return getIndirectResult(Ty,
false, State);
1979 else if (State.CC == llvm::CallingConv::X86_FastCall) {
1981 State.FreeSSERegs = 3;
1982 }
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1984 State.FreeSSERegs = 6;
1987 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1989 State.FreeSSERegs = 8;
1990 }
else if (IsWin32StructABI) {
1993 State.FreeRegs = DefaultNumRegisterParameters;
1994 State.FreeSSERegs = 3;
1996 State.FreeRegs = DefaultNumRegisterParameters;
2003 if (State.FreeRegs) {
2016 if (State.CC == llvm::CallingConv::X86_VectorCall)
2017 runVectorCallFirstPass(FI, State);
2019 bool UsedInAlloca =
false;
2021 for (
int I = 0, E = Args.size(); I < E; ++I) {
2023 if (State.IsPreassigned.test(I))
2033 rewriteWithInAlloca(FI);
2042 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
2047 bool IsIndirect =
false;
2051 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
2053 LLTy = LLTy->getPointerTo(0);
2054 FrameFields.push_back(LLTy);
2055 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
2059 StackOffset = FieldEnd.
alignTo(WordSize);
2060 if (StackOffset != FieldEnd) {
2061 CharUnits NumBytes = StackOffset - FieldEnd;
2062 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
2063 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
2064 FrameFields.push_back(Ty);
2086 llvm_unreachable(
"invalid enum");
2089void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
2090 assert(IsWin32StructABI &&
"inalloca only supported on win32");
2105 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
2107 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
2112 if (
Ret.isIndirect() && !
Ret.getInReg()) {
2113 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
2115 Ret.setInAllocaSRet(IsWin32StructABI);
2123 for (; I != E; ++I) {
2125 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
2128 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2136 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2143 getTypeStackAlignInBytes(Ty,
TypeInfo.
Align.getQuantity()));
2150bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2152 assert(Triple.getArch() == llvm::Triple::x86);
2154 switch (Opts.getStructReturnConvention()) {
2163 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2166 switch (Triple.getOS()) {
2167 case llvm::Triple::DragonFly:
2168 case llvm::Triple::FreeBSD:
2169 case llvm::Triple::OpenBSD:
2170 case llvm::Triple::Win32:
2179 if (!FD->
hasAttr<AnyX86InterruptAttr>())
2182 llvm::Function *Fn = cast<llvm::Function>(GV);
2183 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2189 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
2190 Fn->getContext(), ByValTy);
2191 Fn->addParamAttr(0, NewAttr);
2194void X86_32TargetCodeGenInfo::setTargetAttributes(
2196 if (GV->isDeclaration())
2198 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2199 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2200 llvm::Function *Fn = cast<llvm::Function>(GV);
2201 Fn->addFnAttr(
"stackrealign");
2208bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2213 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
2224 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
2230 Builder.CreateAlignedStore(
2231 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty,
Address, 9),
2237 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
2251enum class X86AVXABILevel {
2258static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2260 case X86AVXABILevel::AVX512:
2262 case X86AVXABILevel::AVX:
2264 case X86AVXABILevel::None:
2267 llvm_unreachable(
"Unknown AVXLevel");
2271class X86_64ABIInfo :
public ABIInfo {
2292 static Class merge(Class Accum, Class Field);
2308 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
2336 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2337 bool isNamedArg,
bool IsRegCall =
false)
const;
2339 llvm::Type *GetByteVectorType(
QualType Ty)
const;
2340 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2341 unsigned IROffset,
QualType SourceTy,
2342 unsigned SourceOffset)
const;
2343 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2344 unsigned IROffset,
QualType SourceTy,
2345 unsigned SourceOffset)
const;
2361 unsigned &neededInt,
unsigned &neededSSE,
2363 bool IsRegCall =
false)
const;
2366 unsigned &NeededSSE,
2367 unsigned &MaxVectorWidth)
const;
2370 unsigned &NeededSSE,
2371 unsigned &MaxVectorWidth)
const;
2373 bool IsIllegalVectorType(
QualType Ty)
const;
2380 bool honorsRevision0_98()
const {
2381 return !getTarget().getTriple().isOSDarwin();
2386 bool classifyIntegerMMXAsSSE()
const {
2388 if (getContext().getLangOpts().getClangABICompat() <=
2389 LangOptions::ClangABI::Ver3_8)
2392 const llvm::Triple &Triple = getTarget().getTriple();
2393 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
2399 bool passInt128VectorsInMem()
const {
2401 if (getContext().getLangOpts().getClangABICompat() <=
2402 LangOptions::ClangABI::Ver9)
2405 const llvm::Triple &T = getTarget().getTriple();
2406 return T.isOSLinux() || T.isOSNetBSD();
2409 X86AVXABILevel AVXLevel;
2412 bool Has64BitPointers;
2416 :
ABIInfo(CGT), AVXLevel(AVXLevel),
2417 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
2420 unsigned neededInt, neededSSE;
2426 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2427 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
2439 bool has64BitPointers()
const {
2440 return Has64BitPointers;
2445class WinX86_64ABIInfo :
public ABIInfo {
2448 :
ABIInfo(CGT), AVXLevel(AVXLevel),
2449 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2456 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
2458 return isX86VectorTypeForVectorCall(getContext(), Ty);
2461 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
2462 uint64_t NumMembers)
const override {
2464 return isX86VectorCallAggregateSmallEnough(NumMembers);
2469 bool IsVectorCall,
bool IsRegCall)
const;
2473 X86AVXABILevel AVXLevel;
2483 std::make_unique<SwiftABIInfo>(CGT,
true);
2486 const X86_64ABIInfo &getABIInfo()
const {
2492 bool markARCOptimizedReturnCallsAsNoTail()
const override {
return true; }
2499 llvm::Value *
Address)
const override {
2500 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
2509 StringRef Constraint,
2510 llvm::Type* Ty)
const override {
2511 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2514 bool isNoProtoCallVariadic(
const CallArgList &args,
2523 bool HasAVXType =
false;
2524 for (CallArgList::const_iterator
2525 it = args.begin(), ie = args.end(); it != ie; ++it) {
2526 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2541 unsigned Sig = (0xeb << 0) |
2545 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2548 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2550 if (GV->isDeclaration())
2552 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2553 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2554 llvm::Function *Fn = cast<llvm::Function>(GV);
2555 Fn->addFnAttr(
"stackrealign");
2568static void initFeatureMaps(
const ASTContext &Ctx,
2569 llvm::StringMap<bool> &CallerMap,
2571 llvm::StringMap<bool> &CalleeMap,
2573 if (CalleeMap.empty() && CallerMap.empty()) {
2584 const llvm::StringMap<bool> &CallerMap,
2585 const llvm::StringMap<bool> &CalleeMap,
2588 bool CallerHasFeat = CallerMap.lookup(Feature);
2589 bool CalleeHasFeat = CalleeMap.lookup(Feature);
2590 if (!CallerHasFeat && !CalleeHasFeat)
2591 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
2592 << IsArgument << Ty << Feature;
2595 if (!CallerHasFeat || !CalleeHasFeat)
2596 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
2597 << IsArgument << Ty << Feature;
2606 const llvm::StringMap<bool> &CallerMap,
2607 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
2611 return checkAVXParamFeature(
Diag, CallLoc, CallerMap, CalleeMap, Ty,
2612 "avx512f", IsArgument);
2615 return checkAVXParamFeature(
Diag, CallLoc, CallerMap, CalleeMap, Ty,
"avx",
2621void X86_64TargetCodeGenInfo::checkFunctionCallABI(
2624 llvm::StringMap<bool> CallerMap;
2625 llvm::StringMap<bool> CalleeMap;
2626 unsigned ArgIndex = 0;
2630 for (
const CallArg &Arg : Args) {
2638 if (Arg.getType()->isVectorType() &&
2640 initFeatureMaps(CGM.
getContext(), CallerMap, Caller, CalleeMap, Callee);
2644 if (ArgIndex < Callee->getNumParams())
2645 Ty =
Callee->getParamDecl(ArgIndex)->getType();
2648 CalleeMap, Ty,
true))
2656 if (
Callee->getReturnType()->isVectorType() &&
2658 initFeatureMaps(CGM.
getContext(), CallerMap, Caller, CalleeMap, Callee);
2660 CalleeMap,
Callee->getReturnType(),
2665static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2669 bool Quote = Lib.contains(
' ');
2670 std::string ArgStr = Quote ?
"\"" :
"";
2672 if (!Lib.endswith_insensitive(
".lib") && !Lib.endswith_insensitive(
".a"))
2674 ArgStr += Quote ?
"\"" :
"";
2678class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2681 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2682 unsigned NumRegisterParameters)
2683 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2684 Win32StructABI, NumRegisterParameters,
false) {}
2686 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2689 void getDependentLibraryOption(llvm::StringRef Lib,
2691 Opt =
"/DEFAULTLIB:";
2692 Opt += qualifyWindowsLibrary(Lib);
2695 void getDetectMismatchOption(llvm::StringRef Name,
2696 llvm::StringRef
Value,
2698 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
2702static void addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2704 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2707 Fn->addFnAttr(
"stack-probe-size",
2710 Fn->addFnAttr(
"no-stack-arg-probe");
2714void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2716 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2717 if (GV->isDeclaration())
2719 addStackProbeTargetAttributes(D, GV, CGM);
2725 X86AVXABILevel AVXLevel)
2728 std::make_unique<SwiftABIInfo>(CGT,
true);
2731 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2739 llvm::Value *
Address)
const override {
2740 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
2748 void getDependentLibraryOption(llvm::StringRef Lib,
2750 Opt =
"/DEFAULTLIB:";
2751 Opt += qualifyWindowsLibrary(Lib);
2754 void getDetectMismatchOption(llvm::StringRef Name,
2755 llvm::StringRef
Value,
2757 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
2761void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2764 if (GV->isDeclaration())
2766 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2767 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2768 llvm::Function *Fn = cast<llvm::Function>(GV);
2769 Fn->addFnAttr(
"stackrealign");
2775 addStackProbeTargetAttributes(D, GV, CGM);
2779void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2804 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2806 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2808 if (Hi == SSEUp && Lo != SSE)
2812X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2836 assert((Accum != Memory && Accum != ComplexX87) &&
2837 "Invalid accumulated classification during merge.");
2838 if (Accum == Field || Field == NoClass)
2840 if (Field == Memory)
2842 if (Accum == NoClass)
2846 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2847 Accum == X87 || Accum == X87Up)
2852void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase, Class &Lo,
2853 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
2864 Class &Current = OffsetBase < 64 ? Lo : Hi;
2870 if (k == BuiltinType::Void) {
2872 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2875 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2877 }
else if (k == BuiltinType::Float || k == BuiltinType::Double ||
2878 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
2880 }
else if (k == BuiltinType::LongDouble) {
2881 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2882 if (LDF == &llvm::APFloat::IEEEquad()) {
2885 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2888 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
2891 llvm_unreachable(
"unexpected long double representation!");
2900 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2911 if (Has64BitPointers) {
2918 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2919 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2920 if (EB_FuncPtr != EB_ThisAdj) {
2934 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2943 uint64_t EB_Lo = (OffsetBase) / 64;
2947 }
else if (Size == 64) {
2948 QualType ElementType = VT->getElementType();
2957 if (!classifyIntegerMMXAsSSE() &&
2968 if (OffsetBase && OffsetBase != 64)
2970 }
else if (Size == 128 ||
2971 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2972 QualType ElementType = VT->getElementType();
2975 if (passInt128VectorsInMem() &&
Size != 128 &&
3007 else if (Size <= 128)
3009 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
3012 }
else if (ET == getContext().DoubleTy) {
3014 }
else if (ET == getContext().LongDoubleTy) {
3015 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3016 if (LDF == &llvm::APFloat::IEEEquad())
3018 else if (LDF == &llvm::APFloat::x87DoubleExtended())
3019 Current = ComplexX87;
3020 else if (LDF == &llvm::APFloat::IEEEdouble())
3023 llvm_unreachable(
"unexpected long double representation!");
3028 uint64_t EB_Real = (OffsetBase) / 64;
3029 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
3030 if (Hi == NoClass && EB_Real != EB_Imag)
3037 if (EITy->getNumBits() <= 64)
3039 else if (EITy->getNumBits() <= 128)
3054 if (!IsRegCall && Size > 512)
3061 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
3067 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
3068 uint64_t ArraySize = AT->getSize().getZExtValue();
3075 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
3078 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
3079 Class FieldLo, FieldHi;
3080 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
3081 Lo = merge(Lo, FieldLo);
3082 Hi = merge(Hi, FieldHi);
3083 if (Lo == Memory || Hi == Memory)
3087 postMerge(Size, Lo, Hi);
3088 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
3118 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3119 for (
const auto &I : CXXRD->bases()) {
3120 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3121 "Unexpected base class!");
3130 Class FieldLo, FieldHi;
3133 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
3134 Lo = merge(Lo, FieldLo);
3135 Hi = merge(Hi, FieldHi);
3136 if (Lo == Memory || Hi == Memory) {
3137 postMerge(Size, Lo, Hi);
3145 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
3147 getContext().getTargetInfo().getTriple().isPS();
3148 bool IsUnion = RT->
isUnionType() && !UseClang11Compat;
3151 i != e; ++i, ++idx) {
3153 bool BitField = i->isBitField();
3156 if (BitField && i->isUnnamedBitfield())
3169 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
3170 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
3172 postMerge(Size, Lo, Hi);
3176 if (!BitField &&
Offset % getContext().getTypeAlign(i->getType())) {
3178 postMerge(Size, Lo, Hi);
3188 Class FieldLo, FieldHi;
3194 assert(!i->isUnnamedBitfield());
3202 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
3207 FieldHi = EB_Hi ?
Integer : NoClass;
3210 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
3211 Lo = merge(Lo, FieldLo);
3212 Hi = merge(Hi, FieldHi);
3213 if (Lo == Memory || Hi == Memory)
3217 postMerge(Size, Lo, Hi);
3227 Ty = EnumTy->getDecl()->getIntegerType();
3230 return getNaturalAlignIndirect(Ty);
3236 return getNaturalAlignIndirect(Ty);
3239bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
3242 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
3243 if (Size <= 64 || Size > LargestVector)
3245 QualType EltTy = VecTy->getElementType();
3246 if (passInt128VectorsInMem() &&
3256 unsigned freeIntRegs)
const {
3269 Ty = EnumTy->getDecl()->getIntegerType();
3280 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
3303 if (freeIntRegs == 0) {
3308 if (Align == 8 && Size <= 64)
3318llvm::Type *X86_64ABIInfo::GetByteVectorType(
QualType Ty)
const {
3324 llvm::Type *IRType = CGT.ConvertType(Ty);
3325 if (isa<llvm::VectorType>(IRType)) {
3328 if (passInt128VectorsInMem() &&
3329 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
3332 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3339 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3344 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
3348 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3365 if (TySize <= StartBit)
3370 unsigned NumElts = (
unsigned)AT->getSize().getZExtValue();
3373 for (
unsigned i = 0; i != NumElts; ++i) {
3375 unsigned EltOffset = i*EltSize;
3376 if (EltOffset >= EndBit)
break;
3378 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3380 EndBit-EltOffset, Context))
3392 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3393 for (
const auto &I : CXXRD->bases()) {
3394 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3395 "Unexpected base class!");
3401 if (BaseOffset >= EndBit)
continue;
3403 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3405 EndBit-BaseOffset, Context))
3416 i != e; ++i, ++idx) {
3420 if (FieldOffset >= EndBit)
break;
3422 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3438 const llvm::DataLayout &TD) {
3439 if (IROffset == 0 && IRType->isFloatingPointTy())
3443 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3444 if (!STy->getNumContainedTypes())
3447 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3448 unsigned Elt = SL->getElementContainingOffset(IROffset);
3449 IROffset -= SL->getElementOffset(Elt);
3454 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3455 llvm::Type *EltTy = ATy->getElementType();
3456 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3457 IROffset -= IROffset / EltSize * EltSize;
3466llvm::Type *X86_64ABIInfo::
3467GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
3468 QualType SourceTy,
unsigned SourceOffset)
const {
3469 const llvm::DataLayout &TD = getDataLayout();
3470 unsigned SourceSize =
3471 (
unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
3473 if (!T0 || T0->isDoubleTy())
3474 return llvm::Type::getDoubleTy(getVMContext());
3477 llvm::Type *T1 =
nullptr;
3478 unsigned T0Size = TD.getTypeAllocSize(T0);
3479 if (SourceSize > T0Size)
3481 if (T1 ==
nullptr) {
3484 if (T0->is16bitFPTy() && SourceSize > 4)
3493 if (T0->isFloatTy() && T1->isFloatTy())
3494 return llvm::FixedVectorType::get(T0, 2);
3496 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
3497 llvm::Type *T2 =
nullptr;
3501 return llvm::FixedVectorType::get(T0, 2);
3502 return llvm::FixedVectorType::get(T0, 4);
3505 if (T0->is16bitFPTy() || T1->is16bitFPTy())
3506 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
3508 return llvm::Type::getDoubleTy(getVMContext());
3526llvm::Type *X86_64ABIInfo::
3527GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
3528 QualType SourceTy,
unsigned SourceOffset)
const {
3531 if (IROffset == 0) {
3533 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3534 IRType->isIntegerTy(64))
3543 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3544 IRType->isIntegerTy(32) ||
3545 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3546 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3547 cast<llvm::IntegerType>(IRType)->getBitWidth();
3550 SourceOffset*8+64, getContext()))
3555 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3557 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3558 if (IROffset < SL->getSizeInBytes()) {
3559 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3560 IROffset -= SL->getElementOffset(FieldIdx);
3562 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3563 SourceTy, SourceOffset);
3567 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3568 llvm::Type *EltTy = ATy->getElementType();
3569 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3570 unsigned EltOffset = IROffset/EltSize*EltSize;
3571 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3577 unsigned TySizeInBytes =
3578 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3580 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
3584 return llvm::IntegerType::get(getVMContext(),
3585 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3596 const llvm::DataLayout &TD) {
3601 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
3602 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
3603 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3604 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
3616 if (Lo->isHalfTy() || Lo->isFloatTy())
3617 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3619 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3620 &&
"Invalid/unknown lo type");
3621 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3625 llvm::StructType *
Result = llvm::StructType::get(Lo, Hi);
3628 assert(TD.getStructLayout(
Result)->getElementOffset(1) == 8 &&
3629 "Invalid x86-64 argument pair!");
3634classifyReturnType(
QualType RetTy)
const {
3637 X86_64ABIInfo::Class Lo, Hi;
3638 classify(RetTy, 0, Lo, Hi,
true);
3641 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3642 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3644 llvm::Type *ResType =
nullptr;
3651 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3652 "Unknown missing lo part");
3657 llvm_unreachable(
"Invalid classification for lo word.");
3662 return getIndirectReturnResult(RetTy);
3667 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3671 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3674 RetTy = EnumTy->getDecl()->getIntegerType();
3677 isPromotableIntegerTypeForABI(RetTy))
3685 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3691 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3698 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3699 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3700 llvm::Type::getX86_FP80Ty(getVMContext()));
3704 llvm::Type *HighPart =
nullptr;
3710 llvm_unreachable(
"Invalid classification for hi word.");
3717 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3722 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3733 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3734 ResType = GetByteVectorType(RetTy);
3745 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3762X86_64ABIInfo::classifyArgumentType(
QualType Ty,
unsigned freeIntRegs,
3763 unsigned &neededInt,
unsigned &neededSSE,
3764 bool isNamedArg,
bool IsRegCall)
const {
3767 X86_64ABIInfo::Class Lo, Hi;
3768 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
3772 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3773 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3777 llvm::Type *ResType =
nullptr;
3784 assert((Hi == SSE || Hi ==
Integer || Hi == X87Up) &&
3785 "Unknown missing lo part");
3798 return getIndirectResult(Ty, freeIntRegs);
3802 llvm_unreachable(
"Invalid classification for lo word.");
3811 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3815 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3818 Ty = EnumTy->getDecl()->getIntegerType();
3821 isPromotableIntegerTypeForABI(Ty))
3831 llvm::Type *IRType = CGT.ConvertType(Ty);
3832 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3838 llvm::Type *HighPart =
nullptr;
3846 llvm_unreachable(
"Invalid classification for hi word.");
3848 case NoClass:
break;
3853 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3863 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3875 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3876 ResType = GetByteVectorType(Ty);
3890X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
3891 unsigned &NeededSSE,
3892 unsigned &MaxVectorWidth)
const {
3894 assert(RT &&
"classifyRegCallStructType only valid with struct types");
3897 return getIndirectReturnResult(Ty);
3900 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->
getDecl())) {
3901 if (CXXRD->isDynamicClass()) {
3902 NeededInt = NeededSSE = 0;
3903 return getIndirectReturnResult(Ty);
3906 for (
const auto &I : CXXRD->bases())
3907 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
3910 NeededInt = NeededSSE = 0;
3911 return getIndirectReturnResult(Ty);
3919 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
3922 NeededInt = NeededSSE = 0;
3923 return getIndirectReturnResult(Ty);
3926 unsigned LocalNeededInt, LocalNeededSSE;
3930 NeededInt = NeededSSE = 0;
3931 return getIndirectReturnResult(Ty);
3933 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
3934 MTy = AT->getElementType();
3936 if (getContext().getTypeSize(VT) > MaxVectorWidth)
3937 MaxVectorWidth = getContext().getTypeSize(VT);
3938 NeededInt += LocalNeededInt;
3939 NeededSSE += LocalNeededSSE;
3947X86_64ABIInfo::classifyRegCallStructType(
QualType Ty,
unsigned &NeededInt,
3948 unsigned &NeededSSE,
3949 unsigned &MaxVectorWidth)
const {
3955 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
3966 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3967 Win64ABIInfo.computeInfo(FI);
3971 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
3974 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3975 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3976 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
3983 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3984 FreeIntRegs -= NeededInt;
3985 FreeSSERegs -= NeededSSE;
3993 getContext().LongDoubleTy)
4005 else if (NeededSSE && MaxVectorWidth > 0)
4017 it != ie; ++it, ++ArgNo) {
4018 bool IsNamedArg = ArgNo < NumRequiredArgs;
4020 if (IsRegCall && it->type->isStructureOrClassType())
4021 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
4025 NeededSSE, IsNamedArg);
4031 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
4032 FreeIntRegs -= NeededInt;
4033 FreeSSERegs -= NeededSSE;
4037 it->info = getIndirectResult(it->type, FreeIntRegs);
4046 llvm::Value *overflow_arg_area =
4062 CGF.
Builder.CreateBitCast(overflow_arg_area,
4063 llvm::PointerType::getUnqual(LTy));
4072 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
4074 Offset,
"overflow_arg_area.next");
4078 return Address(Res, LTy, Align);
4090 unsigned neededInt, neededSSE;
4098 if (!neededInt && !neededSSE)
4112 llvm::Value *InRegs =
nullptr;
4114 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
4118 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
4119 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
4125 llvm::Value *FitsInFP =
4126 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
4127 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
4128 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
4134 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4155 if (neededInt && neededSSE) {
4157 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
4161 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
4162 llvm::Type *TyLo = ST->getElementType(0);
4163 llvm::Type *TyHi = ST->getElementType(1);
4164 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
4165 "Unexpected ABI info for mixed regs");
4166 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
4167 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
4168 llvm::Value *GPAddr =
4170 llvm::Value *FPAddr =
4172 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
4173 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
4178 TyLo, CGF.
Builder.CreateBitCast(RegLoAddr, PTyLo),
4184 TyHi, CGF.
Builder.CreateBitCast(RegHiAddr, PTyHi),
4189 }
else if (neededInt) {
4195 auto TInfo = getContext().getTypeInfoInChars(Ty);
4196 uint64_t TySize = TInfo.Width.getQuantity();
4207 }
else if (neededSSE == 1) {
4212 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
4232 RegAddrLo, ST->getStructElementType(0)));
4235 RegAddrHi, ST->getStructElementType(1)));
4245 llvm::Value *
Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
4250 llvm::Value *
Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
4273 uint64_t Width = getContext().getTypeSize(Ty);
4274 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4282ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
4288 isHomogeneousAggregate(Ty,
Base, NumElts) && FreeSSERegs >= NumElts) {
4289 FreeSSERegs -= NumElts;
4290 return getDirectX86Hva();
4296 bool IsReturnType,
bool IsVectorCall,
4297 bool IsRegCall)
const {
4303 Ty = EnumTy->getDecl()->getIntegerType();
4305 TypeInfo Info = getContext().getTypeInfo(Ty);
4311 if (!IsReturnType) {
4317 return getNaturalAlignIndirect(Ty,
false);
4325 if ((IsVectorCall || IsRegCall) &&
4326 isHomogeneousAggregate(Ty,
Base, NumElts)) {
4328 if (FreeSSERegs >= NumElts) {
4329 FreeSSERegs -= NumElts;
4335 }
else if (IsVectorCall) {
4336 if (FreeSSERegs >= NumElts &&
4338 FreeSSERegs -= NumElts;
4340 }
else if (IsReturnType) {
4352 llvm::Type *LLTy = CGT.ConvertType(Ty);
4353 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
4360 if (Width > 64 || !llvm::isPowerOf2_64(Width))
4361 return getNaturalAlignIndirect(Ty,
false);
4368 switch (BT->getKind()) {
4369 case BuiltinType::Bool:
4374 case BuiltinType::LongDouble:
4378 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4379 if (LDF == &llvm::APFloat::x87DoubleExtended())
4384 case BuiltinType::Int128:
4385 case BuiltinType::UInt128:
4395 llvm::Type::getInt64Ty(getVMContext()), 2));
4418 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4419 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4423 if (CC == llvm::CallingConv::X86_64_SysV) {
4424 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4425 SysVABIInfo.computeInfo(FI);
4429 unsigned FreeSSERegs = 0;
4433 }
else if (IsRegCall) {
4440 IsVectorCall, IsRegCall);
4445 }
else if (IsRegCall) {
4450 unsigned ArgNum = 0;
4451 unsigned ZeroSSERegs = 0;
4456 unsigned *MaybeFreeSSERegs =
4457 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
4459 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
4467 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
4475 uint64_t Width = getContext().getTypeSize(Ty);
4476 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4485 llvm::Value *
Address,
bool Is64Bit,
4492 llvm::IntegerType *i8 = CGF.
Int8Ty;
4493 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4494 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4495 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4548class AIXABIInfo :
public ABIInfo {
4550 const unsigned PtrByteSize;
4555 :
ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
4557 bool isPromotableTypeForABI(
QualType Ty)
const;
4586 llvm::Value *
Address)
const override;
4592bool AIXABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
4595 Ty = EnumTy->getDecl()->getIntegerType();
4598 if (getContext().isPromotableIntegerType(Ty))
4608 switch (BT->getKind()) {
4609 case BuiltinType::Int:
4610 case BuiltinType::UInt:
4630 return getNaturalAlignIndirect(RetTy);
4651 CharUnits CCAlign = getParamTypeAlignment(Ty);
4652 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
4665 Ty = CTy->getElementType();
4680 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4694 if (EltSize < SlotSize)
4702bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
4710class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
4711 bool IsSoftFloatABI;
4712 bool IsRetSmallStructInRegABI;
4718 bool RetSmallStructInRegABI)
4719 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
4720 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
4737 PPC32TargetCodeGenInfo(
CodeGenTypes &CGT,
bool SoftFloatABI,
4738 bool RetSmallStructInRegABI)
4740 CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
4742 static bool isStructReturnInRegABI(
const llvm::Triple &Triple,
4751 llvm::Value *
Address)
const override;
4758 Ty = CTy->getElementType();
4766 const Type *AlignTy =
nullptr;
4769 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4784 (Size = getContext().getTypeSize(RetTy)) <= 64) {
4799 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
4804 return DefaultABIInfo::classifyReturnType(RetTy);
4811 if (getTarget().getTriple().isOSDarwin()) {
4812 auto TI = getContext().getTypeInfoInChars(Ty);
4813 TI.
Align = getParamTypeAlignment(Ty);
4821 const unsigned OverflowLimit = 8;
4836 bool isI64 = Ty->
isIntegerType() && getContext().getTypeSize(Ty) == 64;
4838 bool isF64 = Ty->
isFloatingType() && getContext().getTypeSize(Ty) == 64;
4848 if (isInt || IsSoftFloatABI) {
4849 NumRegsAddr = Builder.CreateStructGEP(VAList, 0,
"gpr");
4851 NumRegsAddr = Builder.CreateStructGEP(VAList, 1,
"fpr");
4854 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr,
"numUsedRegs");
4857 if (isI64 || (isF64 && IsSoftFloatABI)) {
4858 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4859 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4863 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
4869 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4871 llvm::Type *DirectTy = CGF.
ConvertType(Ty), *ElementTy = DirectTy;
4872 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4879 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4880 RegAddr =
Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.
Int8Ty,
4885 if (!(isInt || IsSoftFloatABI)) {
4886 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4893 llvm::Value *RegOffset =
4894 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
4898 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4902 Builder.CreateAdd(NumRegs,
4903 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4904 Builder.CreateStore(NumRegs, NumRegsAddr);
4914 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4927 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4929 Address(Builder.CreateLoad(OverflowAreaAddr,
"argp.cur"), CGF.
Int8Ty,
4933 if (Align > OverflowAreaAlign) {
4934 llvm::Value *Ptr = OverflowArea.
getPointer();
4939 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4942 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4943 Builder.CreateStore(OverflowArea.
getPointer(), OverflowAreaAddr);
4956 getContext().getTypeAlignInChars(Ty));
4962bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
4964 assert(Triple.isPPC32());
4966 switch (Opts.getStructReturnConvention()) {
4975 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
4992class PPC64_SVR4_ABIInfo :
public ABIInfo {
5000 static const unsigned GPRBits = 64;
5002 bool IsSoftFloatABI;
5009 bool isPromotableTypeForABI(
QualType Ty)
const;
5015 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
5016 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
5017 uint64_t Members)
const override;
5035 if ((T->
isVectorType() && getContext().getTypeSize(T) == 128) ||
5054 PPC64_SVR4_ABIInfo::ABIKind Kind,
5057 std::make_unique<PPC64_SVR4_ABIInfo>(CGT,
Kind, SoftFloatABI)) {
5059 std::make_unique<SwiftABIInfo>(CGT,
false);
5068 llvm::Value *
Address)
const override;
5071class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
5073 PPC64TargetCodeGenInfo(
CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
5081 llvm::Value *
Address)
const override;
5089PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
5092 Ty = EnumTy->getDecl()->getIntegerType();
5095 if (isPromotableIntegerTypeForABI(Ty))
5102 case BuiltinType::Int:
5103 case BuiltinType::UInt:
5110 if (EIT->getNumBits() < 64)
5121 Ty = CTy->getElementType();
5123 auto FloatUsesVector = [
this](
QualType Ty){
5125 Ty) == &llvm::APFloat::IEEEquad();
5132 }
else if (FloatUsesVector(Ty)) {
5141 const Type *AlignAsType =
nullptr;
5145 if ((EltType->
isVectorType() && getContext().getTypeSize(EltType) == 128) ||
5147 AlignAsType = EltType;
5153 if (!AlignAsType && Kind == ELFv2 &&
5160 FloatUsesVector(
QualType(AlignAsType, 0));
5177 uint64_t &Members)
const {
5179 uint64_t NElements = AT->getSize().getZExtValue();
5184 Members *= NElements;
5194 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
5195 if (!
getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
5198 for (
const auto &I : CXXRD->bases()) {
5203 uint64_t FldMembers;
5207 Members += FldMembers;
5211 for (
const auto *FD : RD->
fields()) {
5216 if (AT->getSize().getZExtValue() == 0)
5218 FT = AT->getElementType();
5227 uint64_t FldMembers;
5232 std::max(Members, FldMembers) : Members + FldMembers);
5246 Ty = CT->getElementType();
5262 QualType EltTy = VT->getElementType();
5263 unsigned NumElements =
5278bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5282 if (BT->
getKind() == BuiltinType::Float ||
5283 BT->
getKind() == BuiltinType::Double ||
5284 BT->
getKind() == BuiltinType::LongDouble ||
5285 BT->
getKind() == BuiltinType::Ibm128 ||
5286 (getContext().getTargetInfo().hasFloat128Type() &&
5287 (BT->
getKind() == BuiltinType::Float128))) {
5294 if (getContext().getTypeSize(VT) == 128)
5300bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
5301 const Type *
Base, uint64_t Members)
const {
5305 ((getContext().getTargetInfo().hasFloat128Type() &&
5306 Base->isFloat128Type()) ||
5307 Base->isVectorType()) ? 1
5308 : (getContext().getTypeSize(
Base) + 63) / 64;
5311 return Members * NumRegs <= 8;
5315PPC64_SVR4_ABIInfo::classifyArgumentType(
QualType Ty)
const {
5326 return getNaturalAlignIndirect(Ty,
false);
5327 else if (Size < 128) {
5328 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5334 if (EIT->getNumBits() > 128)
5335 return getNaturalAlignIndirect(Ty,
true);
5341 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
5347 if (Kind == ELFv2 &&
5348 isHomogeneousAggregate(Ty,
Base, Members)) {
5349 llvm::Type *BaseTy = CGT.ConvertType(
QualType(
Base, 0));
5350 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5358 uint64_t Bits = getContext().getTypeSize(Ty);
5359 if (Bits > 0 && Bits <= 8 * GPRBits) {
5360 llvm::Type *CoerceTy;
5364 if (Bits <= GPRBits)
5366 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5371 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
5372 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
5373 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
5382 TyAlign > ABIAlign);
5390PPC64_SVR4_ABIInfo::classifyReturnType(
QualType RetTy)
const {
5402 return getNaturalAlignIndirect(RetTy);
5403 else if (Size < 128) {
5404 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5410 if (EIT->getNumBits() > 128)
5411 return getNaturalAlignIndirect(RetTy,
false);
5417 if (Kind == ELFv2 &&
5418 isHomogeneousAggregate(RetTy,
Base, Members)) {
5419 llvm::Type *BaseTy = CGT.ConvertType(
QualType(
Base, 0));