10#include "TargetInfo.h"
13#include "llvm/TargetParser/AArch64TargetParser.h"
24class AArch64ABIInfo :
public ABIInfo {
27 std::unique_ptr<TargetCodeGenInfo> WinX86_64CodegenInfo;
31 : ABIInfo(CGM.getTypes()), Kind(Kind) {
32 if (getTarget().getTriple().isWindowsArm64EC()) {
33 WinX86_64CodegenInfo =
38 bool isSoftFloat()
const {
return Kind == AArch64ABIKind::AAPCSSoft; }
42 bool isDarwinPCS()
const {
return Kind == AArch64ABIKind::DarwinPCS; }
46 bool IsNamedArg,
unsigned CallingConvention,
47 unsigned &NSRN,
unsigned &NPRN)
const;
48 llvm::Type *convertFixedToScalableVectorType(
const VectorType *VT)
const;
49 ABIArgInfo coerceIllegalVector(QualType Ty,
unsigned &NSRN,
50 unsigned &NPRN)
const;
51 ABIArgInfo coerceAndExpandPureScalableAggregate(
52 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
53 const SmallVectorImpl<llvm::Type *> &UnpaddedCoerceToSeq,
unsigned &NSRN,
54 unsigned &NPRN)
const;
55 bool isHomogeneousAggregateBaseType(QualType Ty)
const override;
56 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
57 uint64_t Members)
const override;
58 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate()
const override;
60 bool isIllegalVectorType(QualType Ty)
const;
62 bool passAsAggregateType(QualType Ty)
const;
63 bool passAsPureScalableType(QualType Ty,
unsigned &NV,
unsigned &NP,
64 SmallVectorImpl<llvm::Type *> &CoerceToSeq)
const;
66 void flattenType(llvm::Type *Ty,
67 SmallVectorImpl<llvm::Type *> &Flattened)
const;
69 void computeInfo(CGFunctionInfo &FI)
const override {
75 unsigned NSRN = 0, NPRN = 0;
77 const bool IsNamedArg =
85 RValue EmitDarwinVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
86 AggValueSlot Slot)
const;
88 RValue EmitAAPCSVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
91 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
92 AggValueSlot Slot)
const override {
95 llvm::report_fatal_error(
"Passing SVE types to variadic functions is "
96 "currently not supported");
98 return Kind == AArch64ABIKind::Win64
99 ? EmitMSVAArg(CGF, VAListAddr, Ty, Slot)
100 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)
101 : EmitAAPCSVAArg(VAListAddr, Ty, CGF, Kind, Slot);
104 RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
105 AggValueSlot Slot)
const override;
107 bool allowBFloatArgsAndRet()
const override {
108 return getTarget().hasBFloat16Type();
112 void appendAttributeMangling(TargetClonesAttr *Attr,
unsigned Index,
113 raw_ostream &Out)
const override;
114 void appendAttributeMangling(StringRef AttrStr,
115 raw_ostream &Out)
const override;
120 explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
121 : SwiftABIInfo(CGT,
true) {}
124 unsigned NumElts)
const override;
130 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGM,
Kind)) {
131 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGM.
getTypes());
134 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
135 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
138 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M)
const override {
142 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
144 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
145 CodeGen::CodeGenModule &CGM)
const override {
146 auto *
Fn = dyn_cast<llvm::Function>(GV);
150 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
151 TargetInfo::BranchProtectionInfo BPI(CGM.
getLangOpts());
153 if (FD && FD->hasAttr<TargetAttr>()) {
154 const auto *TA = FD->getAttr<TargetAttr>();
155 ParsedTargetAttr Attr =
157 if (!Attr.BranchProtection.empty()) {
161 assert(
Error.empty());
164 setBranchProtectionFnAttributes(BPI, *Fn);
168 bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
169 llvm::Type *Ty)
const override {
171 auto *ST = dyn_cast<llvm::StructType>(Ty);
172 if (ST && ST->getNumElements() == 1) {
173 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
174 if (AT && AT->getNumElements() == 8 &&
175 AT->getElementType()->isIntegerTy(64))
182 void checkFunctionABI(CodeGenModule &CGM,
183 const FunctionDecl *
Decl)
const override;
185 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
186 const FunctionDecl *Caller,
187 const FunctionDecl *Callee,
const CallArgList &Args,
188 QualType ReturnType)
const override;
190 bool wouldInliningViolateFunctionCallABI(
191 const FunctionDecl *Caller,
const FunctionDecl *Callee)
const override;
196 void checkFunctionCallABIStreaming(CodeGenModule &CGM, SourceLocation CallLoc,
197 const FunctionDecl *Caller,
198 const FunctionDecl *Callee)
const;
201 void checkFunctionCallABISoftFloat(CodeGenModule &CGM, SourceLocation CallLoc,
202 const FunctionDecl *Caller,
203 const FunctionDecl *Callee,
204 const CallArgList &Args,
205 QualType ReturnType)
const;
208class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
210 WindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM,
AArch64ABIKind K)
211 : AArch64TargetCodeGenInfo(CGM, K) {}
213 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
214 CodeGen::CodeGenModule &CGM)
const override;
216 void getDependentLibraryOption(llvm::StringRef Lib,
217 llvm::SmallString<24> &Opt)
const override {
218 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
221 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
222 llvm::SmallString<32> &Opt)
const override {
223 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
227void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
229 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
230 if (GV->isDeclaration())
232 addStackProbeTargetAttributes(D, GV, CGM);
237AArch64ABIInfo::convertFixedToScalableVectorType(
const VectorType *VT)
const {
240 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
242 BuiltinType::UChar &&
243 "unexpected builtin type for SVE predicate!");
244 return llvm::ScalableVectorType::get(llvm::Type::getInt1Ty(getVMContext()),
250 switch (BT->getKind()) {
252 llvm_unreachable(
"unexpected builtin type for SVE vector!");
254 case BuiltinType::SChar:
255 case BuiltinType::UChar:
256 case BuiltinType::MFloat8:
257 return llvm::ScalableVectorType::get(
258 llvm::Type::getInt8Ty(getVMContext()), 16);
260 case BuiltinType::Short:
261 case BuiltinType::UShort:
262 return llvm::ScalableVectorType::get(
263 llvm::Type::getInt16Ty(getVMContext()), 8);
265 case BuiltinType::Int:
266 case BuiltinType::UInt:
267 return llvm::ScalableVectorType::get(
268 llvm::Type::getInt32Ty(getVMContext()), 4);
270 case BuiltinType::Long:
271 case BuiltinType::ULong:
272 return llvm::ScalableVectorType::get(
273 llvm::Type::getInt64Ty(getVMContext()), 2);
275 case BuiltinType::Half:
276 return llvm::ScalableVectorType::get(
277 llvm::Type::getHalfTy(getVMContext()), 8);
279 case BuiltinType::Float:
280 return llvm::ScalableVectorType::get(
281 llvm::Type::getFloatTy(getVMContext()), 4);
283 case BuiltinType::Double:
284 return llvm::ScalableVectorType::get(
285 llvm::Type::getDoubleTy(getVMContext()), 2);
287 case BuiltinType::BFloat16:
288 return llvm::ScalableVectorType::get(
289 llvm::Type::getBFloatTy(getVMContext()), 8);
293 llvm_unreachable(
"expected fixed-length SVE vector");
296ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty,
unsigned &NSRN,
297 unsigned &NPRN)
const {
300 const auto *VT = Ty->
castAs<VectorType>();
301 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
304 BuiltinType::UChar &&
305 "unexpected builtin type for SVE predicate!");
306 NPRN = std::min(NPRN + 1, 4u);
308 llvm::Type::getInt1Ty(getVMContext()), 16));
312 NSRN = std::min(NSRN + 1, 8u);
318 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
319 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
323 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
327 NSRN = std::min(NSRN + 1, 8u);
329 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
333 NSRN = std::min(NSRN + 1, 8u);
335 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
339 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
343ABIArgInfo AArch64ABIInfo::coerceAndExpandPureScalableAggregate(
344 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
345 const SmallVectorImpl<llvm::Type *> &UnpaddedCoerceToSeq,
unsigned &NSRN,
346 unsigned &NPRN)
const {
347 if (!IsNamedArg || NSRN + NVec > 8 || NPRN + NPred > 4)
348 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
357 llvm::Type *UnpaddedCoerceToType =
358 UnpaddedCoerceToSeq.size() == 1
359 ? UnpaddedCoerceToSeq[0]
360 : llvm::StructType::get(CGT.getLLVMContext(), UnpaddedCoerceToSeq,
363 SmallVector<llvm::Type *> CoerceToSeq;
364 flattenType(CGT.ConvertType(Ty), CoerceToSeq);
366 llvm::StructType::get(CGT.getLLVMContext(), CoerceToSeq,
false);
371ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
bool IsVariadicFn,
373 unsigned CallingConvention,
375 unsigned &NPRN)
const {
378 if (IsVariadicFn && getTarget().getTriple().isWindowsArm64EC()) {
381 return WinX86_64CodegenInfo->getABIInfo().classifyArgForArm64ECVarArg(Ty);
385 if (isIllegalVectorType(Ty))
386 return coerceIllegalVector(Ty, NSRN, NPRN);
388 if (!passAsAggregateType(Ty)) {
391 Ty = ED->getIntegerType();
393 if (
const auto *EIT = Ty->
getAs<BitIntType>())
394 if (EIT->getNumBits() > 128)
395 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
399 NSRN = std::min(NSRN + 1, 8u);
400 else if (
const auto *BT = Ty->
getAs<BuiltinType>()) {
401 if (BT->isFloatingPoint())
402 NSRN = std::min(NSRN + 1, 8u);
404 switch (BT->getKind()) {
405 case BuiltinType::SveBool:
406 case BuiltinType::SveCount:
407 NPRN = std::min(NPRN + 1, 4u);
409 case BuiltinType::SveBoolx2:
410 NPRN = std::min(NPRN + 2, 4u);
412 case BuiltinType::SveBoolx4:
413 NPRN = std::min(NPRN + 4, 4u);
416 if (BT->isSVESizelessBuiltinType())
418 NSRN + getContext().getBuiltinVectorTypeInfo(BT).NumVectors,
424 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
432 return getNaturalAlignIndirect(
433 Ty, getDataLayout().getAllocaAddrSpace(),
448 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
461 bool IsWin64 =
Kind == AArch64ABIKind::Win64 ||
462 CallingConvention == llvm::CallingConv::Win64;
463 bool IsWinVariadic = IsWin64 && IsVariadicFn;
466 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
467 NSRN = std::min(NSRN + Members,
uint64_t(8));
468 if (Kind != AArch64ABIKind::AAPCS)
470 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
475 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
476 Align = (Align >= 16) ? 16 : 8;
478 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
479 nullptr,
true, Align);
484 if (Kind == AArch64ABIKind::AAPCS) {
485 unsigned NVec = 0, NPred = 0;
486 SmallVector<llvm::Type *> UnpaddedCoerceToSeq;
487 if (passAsPureScalableType(Ty, NVec, NPred, UnpaddedCoerceToSeq) &&
489 return coerceAndExpandPureScalableAggregate(
490 Ty, IsNamedArg, NVec, NPred, UnpaddedCoerceToSeq, NSRN, NPRN);
496 if (Kind == AArch64ABIKind::AAPCS) {
497 Alignment = getContext().getTypeUnadjustedAlign(Ty);
498 Alignment = Alignment < 128 ? 64 : 128;
501 std::max(getContext().getTypeAlign(Ty),
502 (
unsigned)getTarget().getPointerWidth(LangAS::Default));
504 Size = llvm::alignTo(Size, Alignment);
509 auto ContainsOnlyPointers = [&](
const auto &
Self, QualType Ty) {
515 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
516 for (
const auto &I : CXXRD->bases())
520 return all_of(RD->fields(), [&](FieldDecl *FD) {
521 QualType FDTy = FD->getType();
522 if (FDTy->isArrayType())
523 FDTy = getContext().getBaseElementType(FDTy);
524 return (FDTy->isPointerOrReferenceType() &&
525 getContext().getTypeSize(FDTy) == 64 &&
526 !FDTy->getPointeeType().hasAddressSpace()) ||
533 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
534 if ((Size == 64 || Size == 128) && Alignment == 64 &&
535 ContainsOnlyPointers(ContainsOnlyPointers, Ty))
536 BaseTy = llvm::PointerType::getUnqual(getVMContext());
538 Size == Alignment ? BaseTy
539 : llvm::ArrayType::get(BaseTy, Size / Alignment));
542 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
546ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
547 bool IsVariadicFn)
const {
551 if (
const auto *VT = RetTy->
getAs<VectorType>()) {
553 VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
554 unsigned NSRN = 0, NPRN = 0;
555 return coerceIllegalVector(RetTy, NSRN, NPRN);
560 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
561 return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
563 if (!passAsAggregateType(RetTy)) {
566 RetTy = ED->getIntegerType();
568 if (
const auto *EIT = RetTy->
getAs<BitIntType>())
569 if (EIT->getNumBits() > 128)
570 return getNaturalAlignIndirect(RetTy,
571 getDataLayout().getAllocaAddrSpace());
573 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
585 if (isHomogeneousAggregate(RetTy, Base, Members) &&
586 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
594 if (Kind == AArch64ABIKind::AAPCS) {
595 unsigned NSRN = 0, NPRN = 0;
596 unsigned NVec = 0, NPred = 0;
597 SmallVector<llvm::Type *> UnpaddedCoerceToSeq;
598 if (passAsPureScalableType(RetTy, NVec, NPred, UnpaddedCoerceToSeq) &&
600 return coerceAndExpandPureScalableAggregate(
601 RetTy,
true, NVec, NPred, UnpaddedCoerceToSeq, NSRN,
607 if (Size <= 64 && getDataLayout().isLittleEndian()) {
615 llvm::IntegerType::get(getVMContext(), Size));
618 unsigned Alignment = getContext().getTypeAlign(RetTy);
619 Size = llvm::alignTo(Size, 64);
623 if (Alignment < 128 && Size == 128) {
624 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
630 return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
634bool AArch64ABIInfo::isIllegalVectorType(QualType Ty)
const {
635 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
647 if (!llvm::isPowerOf2_32(NumElements))
652 llvm::Triple Triple = getTarget().getTriple();
653 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
654 Triple.isOSBinFormatMachO())
657 return Size != 64 && (
Size != 128 || NumElements == 1);
662bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
664 unsigned NumElts)
const {
665 if (!llvm::isPowerOf2_32(NumElts))
673bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty)
const {
683 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
684 if (BT->isFloatingPoint())
686 }
else if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
688 Kind == VectorKind::SveFixedLengthData ||
689 Kind == VectorKind::SveFixedLengthPredicate)
692 unsigned VecSize = getContext().getTypeSize(VT);
693 if (VecSize == 64 || VecSize == 128)
699bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
700 uint64_t Members)
const {
704bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
714bool AArch64ABIInfo::passAsAggregateType(QualType Ty)
const {
716 const auto *BT = Ty->
castAs<BuiltinType>();
717 return !BT->isSVECount() &&
718 getContext().getBuiltinVectorTypeInfo(BT).NumVectors > 1;
731bool AArch64ABIInfo::passAsPureScalableType(
732 QualType Ty,
unsigned &NVec,
unsigned &NPred,
733 SmallVectorImpl<llvm::Type *> &CoerceToSeq)
const {
734 if (
const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
739 unsigned NV = 0, NP = 0;
740 SmallVector<llvm::Type *> EltCoerceToSeq;
741 if (!passAsPureScalableType(AT->getElementType(), NV, NP, EltCoerceToSeq))
744 if (CoerceToSeq.size() + NElt * EltCoerceToSeq.size() > 12)
747 for (uint64_t I = 0; I < NElt; ++I)
748 llvm::append_range(CoerceToSeq, EltCoerceToSeq);
762 const RecordDecl *RD = RT->getDecl()->getDefinitionOrSelf();
767 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
768 for (
const auto &I : CXXRD->bases()) {
771 if (!passAsPureScalableType(I.getType(), NVec, NPred, CoerceToSeq))
777 for (
const auto *FD : RD->
fields()) {
778 QualType FT = FD->getType();
781 if (!passAsPureScalableType(FT, NVec, NPred, CoerceToSeq))
788 if (
const auto *VT = Ty->
getAs<VectorType>()) {
789 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
791 if (CoerceToSeq.size() + 1 > 12)
793 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
799 if (CoerceToSeq.size() + 1 > 12)
801 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
812 switch (Ty->
castAs<BuiltinType>()->getKind()) {
813#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
814 case BuiltinType::Id: \
815 isPredicate = false; \
817#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
818 case BuiltinType::Id: \
819 isPredicate = true; \
821#include "clang/Basic/AArch64ACLETypes.def"
826 ASTContext::BuiltinVectorTypeInfo Info =
829 "Expected 1, 2, 3 or 4 vectors!");
835 ? llvm::Type::getInt8Ty(getVMContext())
836 : CGT.ConvertType(Info.ElementType);
837 auto *VTy = llvm::ScalableVectorType::get(EltTy, Info.
EC.getKnownMinValue());
839 if (CoerceToSeq.size() + Info.
NumVectors > 12)
841 std::fill_n(std::back_inserter(CoerceToSeq), Info.
NumVectors, VTy);
849void AArch64ABIInfo::flattenType(
850 llvm::Type *Ty, SmallVectorImpl<llvm::Type *> &Flattened)
const {
853 Flattened.push_back(Ty);
857 if (
const auto *AT = dyn_cast<llvm::ArrayType>(Ty)) {
858 uint64_t NElt = AT->getNumElements();
862 SmallVector<llvm::Type *> EltFlattened;
863 flattenType(AT->getElementType(), EltFlattened);
865 for (uint64_t I = 0; I < NElt; ++I)
866 llvm::append_range(Flattened, EltFlattened);
870 if (
const auto *ST = dyn_cast<llvm::StructType>(Ty)) {
871 for (
auto *ET : ST->elements())
872 flattenType(ET, Flattened);
876 Flattened.push_back(Ty);
879RValue AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
881 AggValueSlot Slot)
const {
885 unsigned NSRN = 0, NPRN = 0;
897 BaseTy = llvm::PointerType::getUnqual(BaseTy->getContext());
901 unsigned NumRegs = 1;
902 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
903 BaseTy = ArrTy->getElementType();
904 NumRegs = ArrTy->getNumElements();
907 !isSoftFloat() && (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
925 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
926 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
929 llvm::Value *reg_offs =
nullptr;
931 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
937 RegSize = llvm::alignTo(RegSize, 8);
943 RegSize = 16 * NumRegs;
954 llvm::Value *UsingStack =
nullptr;
955 UsingStack = CGF.
Builder.CreateICmpSGE(
956 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
958 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
967 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
970 reg_offs = CGF.
Builder.CreateAdd(
971 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
973 reg_offs = CGF.
Builder.CreateAnd(
974 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
982 llvm::Value *NewOffset =
nullptr;
983 NewOffset = CGF.
Builder.CreateAdd(
984 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
989 llvm::Value *InRegs =
nullptr;
990 InRegs = CGF.
Builder.CreateICmpSLE(
991 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
993 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
1003 llvm::Value *reg_top =
nullptr;
1015 MemTy = llvm::PointerType::getUnqual(MemTy->getContext());
1020 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
1021 if (IsHFA && NumMembers > 1) {
1026 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
1027 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
1028 llvm::Type *BaseTy = CGF.
ConvertType(QualType(Base, 0));
1029 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
1031 std::max(TyAlign, BaseTyInfo.Align));
1036 BaseTyInfo.Width.getQuantity() < 16)
1037 Offset = 16 - BaseTyInfo.Width.getQuantity();
1039 for (
unsigned i = 0; i < NumMembers; ++i) {
1056 CharUnits SlotSize = BaseAddr.getAlignment();
1059 TySize < SlotSize) {
1060 CharUnits Offset = SlotSize - TySize;
1082 Address OnStackAddr = Address(OnStackPtr, CGF.
Int8Ty,
1087 CharUnits StackSize;
1089 StackSize = StackSlotSize;
1091 StackSize = TySize.
alignTo(StackSlotSize);
1095 CGF.
Int8Ty, OnStackPtr, StackSizeC,
"new_stack");
1101 TySize < StackSlotSize) {
1102 CharUnits Offset = StackSlotSize - TySize;
1115 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
1116 OnStackBlock,
"vaargs.addr");
1129RValue AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
1130 CodeGenFunction &CGF,
1131 AggValueSlot Slot)
const {
1141 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
1150 auto TyInfo = getContext().getTypeInfoInChars(Ty);
1154 bool IsIndirect =
false;
1155 if (TyInfo.Width.getQuantity() > 16) {
1158 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
1165RValue AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1166 QualType Ty, AggValueSlot Slot)
const {
1167 bool IsIndirect =
false;
1169 if (getTarget().getTriple().isWindowsArm64EC()) {
1172 uint64_t Width = getContext().getTypeSize(Ty);
1173 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
1188 return T->getAArch64SMEAttributes() &
1196 const StringRef ABIName,
1197 const AArch64ABIInfo &
ABIInfo,
1200 const Type *HABase =
nullptr;
1201 uint64_t HAMembers = 0;
1204 Diags.
Report(loc, diag::err_target_unsupported_type_for_abi)
1212void AArch64TargetCodeGenInfo::checkFunctionABI(
1213 CodeGenModule &CGM,
const FunctionDecl *FuncDecl)
const {
1214 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
1215 const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
1217 if (!TI.
hasFeature(
"fp") && !ABIInfo.isSoftFloat()) {
1221 for (ParmVarDecl *PVD : FuncDecl->
parameters()) {
1245 bool CallerIsStreaming =
1247 bool CalleeIsStreaming =
1254 if (!CalleeIsStreamingCompatible &&
1255 (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible)) {
1256 if (CalleeIsStreaming)
1261 if (
auto *NewAttr = Callee->getAttr<ArmNewAttr>()) {
1262 if (NewAttr->isNewZA())
1264 if (NewAttr->isNewZT0())
1268 return Inlinability;
1271void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
1272 CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
1273 const FunctionDecl *Callee)
const {
1274 if (!Caller || !Callee || !
Callee->hasAttr<AlwaysInlineAttr>())
1285 ? diag::err_function_always_inline_attribute_mismatch
1286 : diag::warn_function_always_inline_attribute_mismatch)
1291 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_za)
1292 <<
Callee->getDeclName();
1296 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_zt0)
1297 <<
Callee->getDeclName();
1303void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
1304 CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
1305 const FunctionDecl *Callee,
const CallArgList &Args,
1306 QualType ReturnType)
const {
1307 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
1308 const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
1310 if (!Caller || TI.
hasFeature(
"fp") || ABIInfo.isSoftFloat())
1314 Callee ? Callee : Caller, CallLoc);
1316 for (
const CallArg &Arg : Args)
1318 Callee ? Callee : Caller, CallLoc);
1321void AArch64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
1322 SourceLocation CallLoc,
1323 const FunctionDecl *Caller,
1324 const FunctionDecl *Callee,
1325 const CallArgList &Args,
1326 QualType ReturnType)
const {
1327 checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
1328 checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
1331bool AArch64TargetCodeGenInfo::wouldInliningViolateFunctionCallABI(
1332 const FunctionDecl *Caller,
const FunctionDecl *Callee)
const {
1333 return Caller &&
Callee &&
1337void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
1339 raw_ostream &Out)
const {
1340 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
1343void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
1344 raw_ostream &Out)
const {
1345 if (AttrStr ==
"default") {
1351 SmallVector<StringRef, 8> Features;
1352 AttrStr.split(Features,
"+");
1353 for (
auto &Feat : Features)
1356 llvm::sort(Features, [](
const StringRef LHS,
const StringRef RHS) {
1357 return LHS.compare(RHS) < 0;
1360 llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
1361 for (
auto &Feat : Features)
1362 if (
auto Ext = llvm::AArch64::parseFMVExtension(Feat))
1363 if (UniqueFeats.insert(Ext->Name).second)
1364 Out <<
'M' << Ext->Name;
1367std::unique_ptr<TargetCodeGenInfo>
1370 return std::make_unique<AArch64TargetCodeGenInfo>(CGM, Kind);
1373std::unique_ptr<TargetCodeGenInfo>
1376 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM, K);
static bool isStreamingCompatible(const FunctionDecl *F)
@ ErrorCalleeRequiresNewZA
@ WarnIncompatibleStreamingModes
@ ErrorCalleeRequiresNewZT0
@ IncompatibleStreamingModes
@ LLVM_MARK_AS_BITMASK_ENUM
@ ErrorIncompatibleStreamingModes
static ArmSMEInlinability GetArmSMEInlinability(const FunctionDecl *Caller, const FunctionDecl *Callee)
Determines if there are any Arm SME ABI issues with inlining Callee into Caller.
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D, SourceLocation loc)
TypeInfoChars getTypeInfoInChars(const Type *T) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
static ABIArgInfo getIgnore()
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::ConstantInt * getSize(CharUnits N)
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
RequiredArgs getRequiredArgs() const
llvm::Type * ConvertType(QualType T)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
const TargetInfo & getTarget() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const CodeGenOptions & getCodeGenOpts() const
unsigned getNumRequiredArgs() const
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Decl - This represents one declaration (or definition), e.g.
SourceLocation getLocation() const
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Represents a function declaration or definition.
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
Represents a prototype with parameter type info, e.g.
@ SME_PStateSMCompatibleMask
This represents a decl that may have a name.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
A (possibly-)qualified type.
field_range fields() const
Encodes a location in the source.
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, const LangOptions &LO, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
virtual StringRef getABI() const
Get the ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
The base class of the type hierarchy.
bool isMFloat8Type() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isSVESizelessBuiltinType() const
Returns true for SVE scalable vector types.
const T * castAs() const
Member-template castAs<specific type>.
bool isBuiltinType() const
Helper methods to distinguish type categories.
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
unsigned getNumElements() const
VectorKind getVectorKind() const
QualType getElementType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
bool isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Is the given vector type "legal" for Swift's perspective on the current platform?
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
const FunctionProtoType * T
@ Type
The name was classified as a type.
U cast(CodeGen::Address addr)
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty