10#include "TargetInfo.h"
13#include "llvm/TargetParser/AArch64TargetParser.h"
24class AArch64ABIInfo :
public ABIInfo {
39 bool IsNamedArg,
unsigned CallingConvention,
40 unsigned &NSRN,
unsigned &NPRN)
const;
41 llvm::Type *convertFixedToScalableVectorType(
const VectorType *VT)
const;
43 unsigned &NPRN)
const;
44 ABIArgInfo coerceAndExpandPureScalableAggregate(
45 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
47 unsigned &NPRN)
const;
50 uint64_t Members)
const override;
53 bool isIllegalVectorType(
QualType Ty)
const;
55 bool passAsAggregateType(
QualType Ty)
const;
56 bool passAsPureScalableType(
QualType Ty,
unsigned &NV,
unsigned &NP,
59 void flattenType(llvm::Type *Ty,
68 unsigned NSRN = 0, NPRN = 0;
70 const bool IsNamedArg =
87 if (isa<llvm::ScalableVectorType>(BaseTy))
88 llvm::report_fatal_error(
"Passing SVE types to variadic functions is "
89 "currently not supported");
91 return Kind == AArch64ABIKind::Win64
93 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)
94 : EmitAAPCSVAArg(VAListAddr, Ty, CGF,
Kind, Slot);
106 raw_ostream &Out)
const override;
108 raw_ostream &Out)
const override;
117 unsigned NumElts)
const override;
124 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
128 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
145 if (
const auto *TA = FD->
getAttr<TargetAttr>()) {
148 if (!
Attr.BranchProtection.empty()) {
151 Attr.CPU, BPI, Error);
152 assert(
Error.empty());
155 auto *
Fn = cast<llvm::Function>(GV);
160 llvm::Type *Ty)
const override {
162 auto *ST = dyn_cast<llvm::StructType>(Ty);
163 if (ST && ST->getNumElements() == 1) {
164 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
165 if (AT && AT->getNumElements() == 8 &&
166 AT->getElementType()->isIntegerTy(64))
179 QualType ReturnType)
const override;
199class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
202 : AArch64TargetCodeGenInfo(CGT, K) {}
204 void setTargetAttributes(
const Decl *
D, llvm::GlobalValue *GV,
207 void getDependentLibraryOption(llvm::StringRef Lib,
209 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
212 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
214 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
218void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
220 AArch64TargetCodeGenInfo::setTargetAttributes(
D, GV, CGM);
221 if (GV->isDeclaration())
223 addStackProbeTargetAttributes(
D, GV, CGM);
228AArch64ABIInfo::convertFixedToScalableVectorType(
const VectorType *VT)
const {
231 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
233 BuiltinType::UChar &&
234 "unexpected builtin type for SVE predicate!");
235 return llvm::ScalableVectorType::get(llvm::Type::getInt1Ty(getVMContext()),
241 switch (BT->getKind()) {
243 llvm_unreachable(
"unexpected builtin type for SVE vector!");
245 case BuiltinType::SChar:
246 case BuiltinType::UChar:
247 case BuiltinType::MFloat8:
248 return llvm::ScalableVectorType::get(
249 llvm::Type::getInt8Ty(getVMContext()), 16);
251 case BuiltinType::Short:
252 case BuiltinType::UShort:
253 return llvm::ScalableVectorType::get(
254 llvm::Type::getInt16Ty(getVMContext()), 8);
256 case BuiltinType::Int:
257 case BuiltinType::UInt:
258 return llvm::ScalableVectorType::get(
259 llvm::Type::getInt32Ty(getVMContext()), 4);
261 case BuiltinType::Long:
262 case BuiltinType::ULong:
263 return llvm::ScalableVectorType::get(
264 llvm::Type::getInt64Ty(getVMContext()), 2);
266 case BuiltinType::Half:
267 return llvm::ScalableVectorType::get(
268 llvm::Type::getHalfTy(getVMContext()), 8);
270 case BuiltinType::Float:
271 return llvm::ScalableVectorType::get(
272 llvm::Type::getFloatTy(getVMContext()), 4);
274 case BuiltinType::Double:
275 return llvm::ScalableVectorType::get(
276 llvm::Type::getDoubleTy(getVMContext()), 2);
278 case BuiltinType::BFloat16:
279 return llvm::ScalableVectorType::get(
280 llvm::Type::getBFloatTy(getVMContext()), 8);
284 llvm_unreachable(
"expected fixed-length SVE vector");
288 unsigned &NPRN)
const {
292 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
295 BuiltinType::UChar &&
296 "unexpected builtin type for SVE predicate!");
297 NPRN = std::min(NPRN + 1, 4u);
299 llvm::Type::getInt1Ty(getVMContext()), 16));
303 NSRN = std::min(NSRN + 1, 8u);
309 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
310 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
314 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
318 NSRN = std::min(NSRN + 1, 8u);
320 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
324 NSRN = std::min(NSRN + 1, 8u);
326 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
330 return getNaturalAlignIndirect(Ty,
false);
333ABIArgInfo AArch64ABIInfo::coerceAndExpandPureScalableAggregate(
334 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
336 unsigned &NPRN)
const {
337 if (!IsNamedArg || NSRN + NVec > 8 || NPRN + NPred > 4)
338 return getNaturalAlignIndirect(Ty,
false);
346 llvm::Type *UnpaddedCoerceToType =
347 UnpaddedCoerceToSeq.size() == 1
348 ? UnpaddedCoerceToSeq[0]
349 : llvm::StructType::get(CGT.getLLVMContext(), UnpaddedCoerceToSeq,
353 flattenType(CGT.ConvertType(Ty), CoerceToSeq);
355 llvm::StructType::get(CGT.getLLVMContext(), CoerceToSeq,
false);
362 unsigned CallingConvention,
364 unsigned &NPRN)
const {
368 if (isIllegalVectorType(Ty))
369 return coerceIllegalVector(Ty, NSRN, NPRN);
371 if (!passAsAggregateType(Ty)) {
374 Ty = EnumTy->getDecl()->getIntegerType();
377 if (EIT->getNumBits() > 128)
378 return getNaturalAlignIndirect(Ty,
false);
381 NSRN = std::min(NSRN + 1, 8u);
383 if (BT->isFloatingPoint())
384 NSRN = std::min(NSRN + 1, 8u);
386 switch (BT->getKind()) {
387 case BuiltinType::SveBool:
388 case BuiltinType::SveCount:
389 NPRN = std::min(NPRN + 1, 4u);
391 case BuiltinType::SveBoolx2:
392 NPRN = std::min(NPRN + 2, 4u);
394 case BuiltinType::SveBoolx4:
395 NPRN = std::min(NPRN + 4, 4u);
398 if (BT->isSVESizelessBuiltinType())
400 NSRN + getContext().getBuiltinVectorTypeInfo(BT).NumVectors,
406 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
414 return getNaturalAlignIndirect(Ty, RAA ==
423 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
428 if (IsEmpty && Size == 0)
436 bool IsWin64 =
Kind == AArch64ABIKind::Win64 ||
437 CallingConvention == llvm::CallingConv::Win64;
438 bool IsWinVariadic = IsWin64 && IsVariadicFn;
441 if (!IsWinVariadic && isHomogeneousAggregate(Ty,
Base, Members)) {
442 NSRN = std::min(NSRN + Members,
uint64_t(8));
443 if (Kind != AArch64ABIKind::AAPCS)
445 llvm::ArrayType::get(CGT.ConvertType(
QualType(
Base, 0)), Members));
450 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
451 Align = (Align >= 16) ? 16 : 8;
453 llvm::ArrayType::get(CGT.ConvertType(
QualType(
Base, 0)), Members), 0,
454 nullptr,
true, Align);
459 if (Kind == AArch64ABIKind::AAPCS) {
460 unsigned NVec = 0, NPred = 0;
462 if (passAsPureScalableType(Ty, NVec, NPred, UnpaddedCoerceToSeq) &&
464 return coerceAndExpandPureScalableAggregate(
465 Ty, IsNamedArg, NVec, NPred, UnpaddedCoerceToSeq, NSRN, NPRN);
471 if (Kind == AArch64ABIKind::AAPCS) {
472 Alignment = getContext().getTypeUnadjustedAlign(Ty);
473 Alignment = Alignment < 128 ? 64 : 128;
476 std::max(getContext().getTypeAlign(Ty),
477 (
unsigned)getTarget().getPointerWidth(LangAS::Default));
479 Size = llvm::alignTo(Size, Alignment);
483 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
485 Size == Alignment ? BaseTy
486 : llvm::ArrayType::get(BaseTy, Size / Alignment));
489 return getNaturalAlignIndirect(Ty,
false);
493 bool IsVariadicFn)
const {
499 VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
500 unsigned NSRN = 0, NPRN = 0;
501 return coerceIllegalVector(RetTy, NSRN, NPRN);
506 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
507 return getNaturalAlignIndirect(RetTy);
509 if (!passAsAggregateType(RetTy)) {
512 RetTy = EnumTy->getDecl()->getIntegerType();
515 if (EIT->getNumBits() > 128)
516 return getNaturalAlignIndirect(RetTy);
518 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
530 if (isHomogeneousAggregate(RetTy,
Base, Members) &&
531 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
539 if (Kind == AArch64ABIKind::AAPCS) {
540 unsigned NSRN = 0, NPRN = 0;
541 unsigned NVec = 0, NPred = 0;
543 if (passAsPureScalableType(RetTy, NVec, NPred, UnpaddedCoerceToSeq) &&
545 return coerceAndExpandPureScalableAggregate(
546 RetTy,
true, NVec, NPred, UnpaddedCoerceToSeq, NSRN,
552 if (Size <= 64 && getDataLayout().isLittleEndian()) {
560 llvm::IntegerType::get(getVMContext(), Size));
563 unsigned Alignment = getContext().getTypeAlign(RetTy);
564 Size = llvm::alignTo(Size, 64);
568 if (Alignment < 128 && Size == 128) {
569 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
575 return getNaturalAlignIndirect(RetTy);
579bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
592 if (!llvm::isPowerOf2_32(NumElements))
597 llvm::Triple Triple = getTarget().getTriple();
598 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
599 Triple.isOSBinFormatMachO())
602 return Size != 64 && (
Size != 128 || NumElements == 1);
607bool AArch64SwiftABIInfo::isLegalVectorType(
CharUnits VectorSize,
609 unsigned NumElts)
const {
610 if (!llvm::isPowerOf2_32(NumElts))
618bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
629 if (BT->isFloatingPoint())
633 Kind == VectorKind::SveFixedLengthData ||
634 Kind == VectorKind::SveFixedLengthPredicate)
637 unsigned VecSize = getContext().getTypeSize(VT);
638 if (VecSize == 64 || VecSize == 128)
644bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *
Base,
645 uint64_t Members)
const {
649bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
659bool AArch64ABIInfo::passAsAggregateType(
QualType Ty)
const {
663 getContext().getBuiltinVectorTypeInfo(BT).NumVectors > 1;
676bool AArch64ABIInfo::passAsPureScalableType(
677 QualType Ty,
unsigned &NVec,
unsigned &NPred,
684 unsigned NV = 0, NP = 0;
686 if (!passAsPureScalableType(AT->getElementType(), NV, NP, EltCoerceToSeq))
689 if (CoerceToSeq.size() + NElt * EltCoerceToSeq.size() > 12)
692 for (uint64_t I = 0; I < NElt; ++I)
693 llvm::copy(EltCoerceToSeq, std::back_inserter(CoerceToSeq));
712 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
713 for (
const auto &I : CXXRD->bases()) {
716 if (!passAsPureScalableType(I.getType(), NVec, NPred, CoerceToSeq))
722 for (
const auto *FD : RD->
fields()) {
726 if (!passAsPureScalableType(FT, NVec, NPred, CoerceToSeq))
734 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
736 if (CoerceToSeq.size() + 1 > 12)
738 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
744 if (CoerceToSeq.size() + 1 > 12)
746 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
758#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
759 case BuiltinType::Id: \
760 isPredicate = false; \
762#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
763 case BuiltinType::Id: \
764 isPredicate = true; \
766#define SVE_TYPE(Name, Id, SingletonId)
767#include "clang/Basic/AArch64SVEACLETypes.def"
773 getContext().getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
775 "Expected 1, 2, 3 or 4 vectors!");
781 ? llvm::Type::getInt8Ty(getVMContext())
782 : CGT.ConvertType(Info.ElementType);
783 auto *VTy = llvm::ScalableVectorType::get(EltTy, Info.
EC.getKnownMinValue());
785 if (CoerceToSeq.size() + Info.
NumVectors > 12)
787 std::fill_n(std::back_inserter(CoerceToSeq), Info.
NumVectors, VTy);
795void AArch64ABIInfo::flattenType(
799 Flattened.push_back(Ty);
803 if (
const auto *AT = dyn_cast<llvm::ArrayType>(Ty)) {
804 uint64_t NElt = AT->getNumElements();
809 flattenType(AT->getElementType(), EltFlattened);
811 for (uint64_t I = 0; I < NElt; ++I)
812 llvm::copy(EltFlattened, std::back_inserter(Flattened));
816 if (
const auto *ST = dyn_cast<llvm::StructType>(Ty)) {
817 for (
auto *ET : ST->elements())
818 flattenType(ET, Flattened);
822 Flattened.push_back(Ty);
831 unsigned NSRN = 0, NPRN = 0;
843 BaseTy = llvm::PointerType::getUnqual(BaseTy);
847 unsigned NumRegs = 1;
848 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
849 BaseTy = ArrTy->getElementType();
850 NumRegs = ArrTy->getNumElements();
853 !isSoftFloat() && (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
871 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
872 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
875 llvm::Value *reg_offs =
nullptr;
877 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
883 RegSize = llvm::alignTo(RegSize, 8);
889 RegSize = 16 * NumRegs;
900 llvm::Value *UsingStack =
nullptr;
901 UsingStack = CGF.
Builder.CreateICmpSGE(
902 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
904 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
913 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
916 reg_offs = CGF.
Builder.CreateAdd(
917 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
919 reg_offs = CGF.
Builder.CreateAnd(
920 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
928 llvm::Value *NewOffset =
nullptr;
929 NewOffset = CGF.
Builder.CreateAdd(
930 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
935 llvm::Value *InRegs =
nullptr;
936 InRegs = CGF.
Builder.CreateICmpSLE(
937 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
939 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
949 llvm::Value *reg_top =
nullptr;
961 MemTy = llvm::PointerType::getUnqual(MemTy);
966 bool IsHFA = isHomogeneousAggregate(Ty,
Base, NumMembers);
967 if (IsHFA && NumMembers > 1) {
972 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
973 auto BaseTyInfo = getContext().getTypeInfoInChars(
QualType(
Base, 0));
975 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
977 std::max(TyAlign, BaseTyInfo.Align));
982 BaseTyInfo.Width.getQuantity() < 16)
983 Offset = 16 - BaseTyInfo.Width.getQuantity();
985 for (
unsigned i = 0; i < NumMembers; ++i) {
1002 CharUnits SlotSize = BaseAddr.getAlignment();
1005 TySize < SlotSize) {
1035 StackSize = StackSlotSize;
1037 StackSize = TySize.
alignTo(StackSlotSize);
1041 CGF.
Int8Ty, OnStackPtr, StackSizeC,
"new_stack");
1047 TySize < StackSlotSize) {
1048 CharUnits Offset = StackSlotSize - TySize;
1062 OnStackBlock,
"vaargs.addr");
1087 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
1096 auto TyInfo = getContext().getTypeInfoInChars(Ty);
1100 bool IsIndirect =
false;
1101 if (TyInfo.Width.getQuantity() > 16) {
1104 IsIndirect = !isHomogeneousAggregate(Ty,
Base, Members);
1113 bool IsIndirect =
false;
1135 const StringRef ABIName,
1136 const AArch64ABIInfo &
ABIInfo,
1139 const Type *HABase =
nullptr;
1140 uint64_t HAMembers = 0;
1143 Diags.
Report(loc, diag::err_target_unsupported_type_for_abi)
1144 <<
D->getDeclName() << Ty << ABIName;
1151void AArch64TargetCodeGenInfo::checkFunctionABI(
1153 const AArch64ABIInfo &
ABIInfo = getABIInfo<AArch64ABIInfo>();
1184 bool CallerIsStreaming =
1186 bool CalleeIsStreaming =
1193 if (!CalleeIsStreamingCompatible &&
1194 (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible)) {
1195 if (CalleeIsStreaming)
1200 if (
auto *NewAttr = Callee->getAttr<ArmNewAttr>()) {
1201 if (NewAttr->isNewZA())
1203 if (NewAttr->isNewZT0())
1207 return Inlinability;
1210void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
1213 if (!Caller || !Callee || !
Callee->hasAttr<AlwaysInlineAttr>())
1224 ? diag::err_function_always_inline_attribute_mismatch
1225 : diag::warn_function_always_inline_attribute_mismatch)
1230 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_za)
1231 <<
Callee->getDeclName();
1235 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_zt0)
1236 <<
Callee->getDeclName();
1242void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
1246 const AArch64ABIInfo &
ABIInfo = getABIInfo<AArch64ABIInfo>();
1253 Callee ? Callee : Caller, CallLoc);
1255 for (
const CallArg &Arg : Args)
1257 Callee ? Callee : Caller, CallLoc);
1260void AArch64TargetCodeGenInfo::checkFunctionCallABI(
CodeGenModule &CGM,
1266 checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
1267 checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
1270bool AArch64TargetCodeGenInfo::wouldInliningViolateFunctionCallABI(
1272 return Caller &&
Callee &&
1276void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *
Attr,
1278 raw_ostream &Out)
const {
1279 appendAttributeMangling(
Attr->getFeatureStr(Index), Out);
1282void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
1283 raw_ostream &Out)
const {
1284 if (AttrStr ==
"default") {
1291 AttrStr.split(Features,
"+");
1292 for (
auto &Feat : Features)
1295 llvm::sort(Features, [](
const StringRef LHS,
const StringRef RHS) {
1296 return LHS.compare(RHS) < 0;
1299 llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
1300 for (
auto &Feat : Features)
1301 if (
auto Ext = llvm::AArch64::parseFMVExtension(Feat))
1302 if (UniqueFeats.insert(Ext->Name).second)
1303 Out <<
'M' << Ext->Name;
1306std::unique_ptr<TargetCodeGenInfo>
1309 return std::make_unique<AArch64TargetCodeGenInfo>(CGM.
getTypes(), Kind);
1312std::unique_ptr<TargetCodeGenInfo>
1315 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.
getTypes(), K);
static bool isStreamingCompatible(const FunctionDecl *F)
@ ErrorCalleeRequiresNewZA
@ WarnIncompatibleStreamingModes
@ ErrorCalleeRequiresNewZT0
@ IncompatibleStreamingModes
@ LLVM_MARK_AS_BITMASK_ENUM
@ ErrorIncompatibleStreamingModes
static ArmSMEInlinability GetArmSMEInlinability(const FunctionDecl *Caller, const FunctionDecl *Callee)
Determines if there are any Arm SME ABI issues with inlining Callee into Caller.
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D, SourceLocation loc)
TypeInfoChars getTypeInfoInChars(const Type *T) const
const TargetInfo & getTargetInfo() const
Attr - This represents one attribute.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
Represents a C++ struct/union/class.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
virtual bool allowBFloatArgsAndRet() const
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
CodeGen::CGCXXABI & getCXXABI() const
ASTContext & getContext() const
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
const TargetInfo & getTarget() const
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::ConstantInt * getSize(CharUnits N)
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
RequiredArgs getRequiredArgs() const
CallArgList - Type for representing both the value and type of arguments in a call.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Type * ConvertTypeForMem(QualType T)
const TargetInfo & getTarget() const
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
ASTContext & getContext() const
llvm::Type * ConvertType(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
unsigned getNumRequiredArgs() const
Target specific hooks for defining how a type should be passed or returned from functions with one of...
virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const
Returns true if the given vector type is legal from Swift's calling convention perspective.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual bool wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller, const FunctionDecl *Callee) const
Returns true if inlining the function call would produce incorrect code for the current target and sh...
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
static void setBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::Function &F)
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual void checkFunctionABI(CodeGenModule &CGM, const FunctionDecl *Decl) const
Any further codegen related checks that need to be done on a function signature in a target specific ...
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Represents the canonical version of C arrays with a specified constant size.
Decl - This represents one declaration (or definition), e.g.
SourceLocation getLocation() const
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
Represents a prototype with parameter type info, e.g.
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
@ SME_PStateSMCompatibleMask
This represents a decl that may have a name.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Represents a parameter to a function.
A (possibly-)qualified type.
Represents a struct/union/class.
field_range fields() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Encodes a location in the source.
Exposes information about the current target.
virtual StringRef getABI() const
Get the ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
virtual bool hasBFloat16Type() const
Determine whether the _BFloat16 type is supported on this target.
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
The base class of the type hierarchy.
bool isMFloat8Type() const
bool isSVESizelessBuiltinType() const
Returns true for SVE scalable vector types.
const T * castAs() const
Member-template castAs<specific type>.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isVectorType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
Represents a GCC generic vector type.
unsigned getNumElements() const
VectorKind getVectorKind() const
QualType getElementType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
The JSON file list parser is used to communicate input to InstallAPI.
const FunctionProtoType * T
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty
Contains information gathered from parsing the contents of TargetAttr.