10#include "TargetInfo.h"
21class AArch64ABIInfo :
public ABIInfo {
34 unsigned CallingConvention)
const;
38 uint64_t Members)
const override;
41 bool isIllegalVectorType(
QualType Ty)
const;
62 if (isa<llvm::ScalableVectorType>(BaseTy))
63 llvm::report_fatal_error(
"Passing SVE types to variadic functions is "
64 "currently not supported");
67 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
68 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
85 unsigned NumElts)
const override;
92 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
96 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
107 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
111 const auto *TA = FD->
getAttr<TargetAttr>();
117 if (
Attr.BranchProtection.empty())
123 Attr.CPU, BPI, Error);
124 assert(
Error.empty());
126 auto *Fn = cast<llvm::Function>(GV);
127 static const char *SignReturnAddrStr[] = {
"none",
"non-leaf",
"all"};
128 Fn->addFnAttr(
"sign-return-address", SignReturnAddrStr[
static_cast<int>(BPI.
SignReturnAddr)]);
130 if (BPI.
SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
131 Fn->addFnAttr(
"sign-return-address-key",
132 BPI.
SignKey == LangOptions::SignReturnAddressKeyKind::AKey
137 Fn->addFnAttr(
"branch-target-enforcement",
142 llvm::Type *Ty)
const override {
144 auto *ST = dyn_cast<llvm::StructType>(Ty);
145 if (ST && ST->getNumElements() == 1) {
146 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
147 if (AT && AT->getNumElements() == 8 &&
148 AT->getElementType()->isIntegerTy(64))
156class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
159 : AArch64TargetCodeGenInfo(CGT, K) {}
161 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
164 void getDependentLibraryOption(llvm::StringRef Lib,
166 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
169 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
171 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
175void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
177 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
178 if (GV->isDeclaration())
180 addStackProbeTargetAttributes(D, GV, CGM);
188 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
189 assert(VT->getElementType()->isBuiltinType() &&
"expected builtin type!");
191 BuiltinType::UChar &&
192 "unexpected builtin type for SVE predicate!");
194 llvm::Type::getInt1Ty(getVMContext()), 16));
197 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) {
198 assert(VT->getElementType()->isBuiltinType() &&
"expected builtin type!");
200 const auto *BT = VT->getElementType()->castAs<
BuiltinType>();
201 llvm::ScalableVectorType *ResType =
nullptr;
202 switch (BT->getKind()) {
204 llvm_unreachable(
"unexpected builtin type for SVE vector!");
205 case BuiltinType::SChar:
206 case BuiltinType::UChar:
207 ResType = llvm::ScalableVectorType::get(
208 llvm::Type::getInt8Ty(getVMContext()), 16);
210 case BuiltinType::Short:
211 case BuiltinType::UShort:
212 ResType = llvm::ScalableVectorType::get(
213 llvm::Type::getInt16Ty(getVMContext()), 8);
215 case BuiltinType::Int:
216 case BuiltinType::UInt:
217 ResType = llvm::ScalableVectorType::get(
218 llvm::Type::getInt32Ty(getVMContext()), 4);
220 case BuiltinType::Long:
221 case BuiltinType::ULong:
222 ResType = llvm::ScalableVectorType::get(
223 llvm::Type::getInt64Ty(getVMContext()), 2);
225 case BuiltinType::Half:
226 ResType = llvm::ScalableVectorType::get(
227 llvm::Type::getHalfTy(getVMContext()), 8);
229 case BuiltinType::Float:
230 ResType = llvm::ScalableVectorType::get(
231 llvm::Type::getFloatTy(getVMContext()), 4);
233 case BuiltinType::Double:
234 ResType = llvm::ScalableVectorType::get(
235 llvm::Type::getDoubleTy(getVMContext()), 2);
237 case BuiltinType::BFloat16:
238 ResType = llvm::ScalableVectorType::get(
239 llvm::Type::getBFloatTy(getVMContext()), 8);
247 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
248 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
252 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
257 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
262 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
265 return getNaturalAlignIndirect(Ty,
false);
269AArch64ABIInfo::classifyArgumentType(
QualType Ty,
bool IsVariadic,
270 unsigned CallingConvention)
const {
274 if (isIllegalVectorType(Ty))
275 return coerceIllegalVector(Ty);
280 Ty = EnumTy->getDecl()->getIntegerType();
283 if (EIT->getNumBits() > 128)
284 return getNaturalAlignIndirect(Ty);
286 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
294 return getNaturalAlignIndirect(Ty, RAA ==
302 if (IsEmpty || Size == 0) {
303 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
308 if (IsEmpty && Size == 0)
316 bool IsWin64 =
Kind == AArch64ABIKind::Win64 ||
317 CallingConvention == llvm::CallingConv::Win64;
318 bool IsWinVariadic = IsWin64 && IsVariadic;
321 if (!IsWinVariadic && isHomogeneousAggregate(Ty,
Base, Members)) {
322 if (Kind != AArch64ABIKind::AAPCS)
324 llvm::ArrayType::get(CGT.ConvertType(
QualType(
Base, 0)), Members));
329 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
330 Align = (Align >= 16) ? 16 : 8;
332 llvm::ArrayType::get(CGT.ConvertType(
QualType(
Base, 0)), Members), 0,
333 nullptr,
true, Align);
340 if (getTarget().isRenderScriptTarget()) {
344 if (Kind == AArch64ABIKind::AAPCS) {
345 Alignment = getContext().getTypeUnadjustedAlign(Ty);
346 Alignment = Alignment < 128 ? 64 : 128;
349 std::max(getContext().getTypeAlign(Ty),
350 (
unsigned)getTarget().getPointerWidth(LangAS::Default));
352 Size = llvm::alignTo(Size, Alignment);
356 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
358 Size == Alignment ? BaseTy
359 : llvm::ArrayType::get(BaseTy, Size / Alignment));
362 return getNaturalAlignIndirect(Ty,
false);
366 bool IsVariadic)
const {
371 if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
372 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
373 return coerceIllegalVector(RetTy);
377 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
378 return getNaturalAlignIndirect(RetTy);
383 RetTy = EnumTy->getDecl()->getIntegerType();
386 if (EIT->getNumBits() > 128)
387 return getNaturalAlignIndirect(RetTy);
389 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
400 if (isHomogeneousAggregate(RetTy,
Base, Members) &&
401 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
410 if (getTarget().isRenderScriptTarget()) {
414 if (Size <= 64 && getDataLayout().isLittleEndian()) {
422 llvm::IntegerType::get(getVMContext(), Size));
425 unsigned Alignment = getContext().getTypeAlign(RetTy);
426 Size = llvm::alignTo(Size, 64);
430 if (Alignment < 128 && Size == 128) {
431 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
437 return getNaturalAlignIndirect(RetTy);
441bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
446 if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
447 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
451 unsigned NumElements = VT->getNumElements();
454 if (!llvm::isPowerOf2_32(NumElements))
459 llvm::Triple Triple = getTarget().getTriple();
460 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
461 Triple.isOSBinFormatMachO())
464 return Size != 64 && (
Size != 128 || NumElements == 1);
469bool AArch64SwiftABIInfo::isLegalVectorType(
CharUnits VectorSize,
471 unsigned NumElts)
const {
472 if (!llvm::isPowerOf2_32(NumElts))
480bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
486 if (BT->isFloatingPoint())
489 unsigned VecSize = getContext().getTypeSize(VT);
490 if (VecSize == 64 || VecSize == 128)
496bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *
Base,
497 uint64_t Members)
const {
501bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
517 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
528 BaseTy = llvm::PointerType::getUnqual(BaseTy);
532 unsigned NumRegs = 1;
533 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
534 BaseTy = ArrTy->getElementType();
535 NumRegs = ArrTy->getNumElements();
537 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
555 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
556 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
559 llvm::Value *reg_offs =
nullptr;
561 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
567 RegSize = llvm::alignTo(RegSize, 8);
573 RegSize = 16 * NumRegs;
584 llvm::Value *UsingStack =
nullptr;
585 UsingStack = CGF.
Builder.CreateICmpSGE(
586 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
588 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
597 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
600 reg_offs = CGF.
Builder.CreateAdd(
601 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
603 reg_offs = CGF.
Builder.CreateAnd(
604 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
612 llvm::Value *NewOffset =
nullptr;
613 NewOffset = CGF.
Builder.CreateAdd(
614 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
619 llvm::Value *InRegs =
nullptr;
620 InRegs = CGF.
Builder.CreateICmpSLE(
621 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
623 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
633 llvm::Value *reg_top =
nullptr;
645 MemTy = llvm::PointerType::getUnqual(MemTy);
650 bool IsHFA = isHomogeneousAggregate(Ty,
Base, NumMembers);
651 if (IsHFA && NumMembers > 1) {
656 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
657 auto BaseTyInfo = getContext().getTypeInfoInChars(
QualType(
Base, 0));
659 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
661 std::max(TyAlign, BaseTyInfo.Align));
666 BaseTyInfo.Width.getQuantity() < 16)
667 Offset = 16 - BaseTyInfo.Width.getQuantity();
669 for (
unsigned i = 0; i < NumMembers; ++i) {
686 CharUnits SlotSize = BaseAddr.getAlignment();
714 OnStackPtr = CGF.
Builder.CreateAdd(
715 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
717 OnStackPtr = CGF.
Builder.CreateAnd(
718 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
730 StackSize = StackSlotSize;
732 StackSize = TySize.
alignTo(StackSlotSize);
735 llvm::Value *NewStack = CGF.
Builder.CreateInBoundsGEP(
736 CGF.
Int8Ty, OnStackPtr, StackSizeC,
"new_stack");
742 TySize < StackSlotSize) {
743 CharUnits Offset = StackSlotSize - TySize;
757 OnStackBlock,
"vaargs.addr");
774 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
784 auto TyInfo = getContext().getTypeInfoInChars(Ty);
788 bool IsIndirect =
false;
789 if (TyInfo.Width.getQuantity() > 16) {
792 IsIndirect = !isHomogeneousAggregate(Ty,
Base, Members);
796 TyInfo, SlotSize,
true);
801 bool IsIndirect =
false;
813std::unique_ptr<TargetCodeGenInfo>
816 return std::make_unique<AArch64TargetCodeGenInfo>(CGM.
getTypes(), Kind);
819std::unique_ptr<TargetCodeGenInfo>
822 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.
getTypes(), K);
TypeInfoChars getTypeInfoInChars(const Type *T) const
Attr - This represents one attribute.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
virtual bool allowBFloatArgsAndRet() const
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
CodeGen::CGCXXABI & getCXXABI() const
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
const TargetInfo & getTarget() const
virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::ConstantInt * getSize(CharUnits N)
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Type * ConvertTypeForMem(QualType T)
const TargetInfo & getTarget() const
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
ASTContext & getContext() const
llvm::Type * ConvertType(QualType T)
const CGFunctionInfo * CurFnInfo
This class organizes the cross-function state that is used while generating LLVM code.
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const
Returns true if the given vector type is legal from Swift's calling convention perspective.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Decl - This represents one declaration (or definition), e.g.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
A (possibly-)qualified type.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
virtual bool hasBFloat16Type() const
Determine whether the _BFloat16 type is supported on this target.
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
The base class of the type hierarchy.
const T * castAs() const
Member-template castAs<specific type>.
bool isVectorType() const
const T * getAs() const
Member-template getAs<specific type>'.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Load(InterpState &S, CodePtr OpPC)
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty
llvm::PointerType * Int8PtrTy
Contains information gathered from parsing the contents of TargetAttr.
LangOptions::SignReturnAddressScopeKind SignReturnAddr
LangOptions::SignReturnAddressKeyKind SignKey
bool BranchTargetEnforcement