10#include "TargetInfo.h"
13#include "llvm/TargetParser/AArch64TargetParser.h"
24class AArch64ABIInfo :
public ABIInfo {
29 : ABIInfo(CGT), Kind(Kind) {}
31 bool isSoftFloat()
const {
return Kind == AArch64ABIKind::AAPCSSoft; }
35 bool isDarwinPCS()
const {
return Kind == AArch64ABIKind::DarwinPCS; }
39 bool IsNamedArg,
unsigned CallingConvention,
40 unsigned &NSRN,
unsigned &NPRN)
const;
41 llvm::Type *convertFixedToScalableVectorType(
const VectorType *VT)
const;
42 ABIArgInfo coerceIllegalVector(QualType Ty,
unsigned &NSRN,
43 unsigned &NPRN)
const;
44 ABIArgInfo coerceAndExpandPureScalableAggregate(
45 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
46 const SmallVectorImpl<llvm::Type *> &UnpaddedCoerceToSeq,
unsigned &NSRN,
47 unsigned &NPRN)
const;
48 bool isHomogeneousAggregateBaseType(QualType Ty)
const override;
49 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
50 uint64_t Members)
const override;
51 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate()
const override;
53 bool isIllegalVectorType(QualType Ty)
const;
55 bool passAsAggregateType(QualType Ty)
const;
56 bool passAsPureScalableType(QualType Ty,
unsigned &NV,
unsigned &NP,
57 SmallVectorImpl<llvm::Type *> &CoerceToSeq)
const;
59 void flattenType(llvm::Type *Ty,
60 SmallVectorImpl<llvm::Type *> &Flattened)
const;
62 void computeInfo(CGFunctionInfo &FI)
const override {
68 unsigned NSRN = 0, NPRN = 0;
70 const bool IsNamedArg =
78 RValue EmitDarwinVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
79 AggValueSlot Slot)
const;
81 RValue EmitAAPCSVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
84 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
85 AggValueSlot Slot)
const override {
88 llvm::report_fatal_error(
"Passing SVE types to variadic functions is "
89 "currently not supported");
91 return Kind == AArch64ABIKind::Win64
92 ? EmitMSVAArg(CGF, VAListAddr, Ty, Slot)
93 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)
94 : EmitAAPCSVAArg(VAListAddr, Ty, CGF, Kind, Slot);
97 RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
98 AggValueSlot Slot)
const override;
100 bool allowBFloatArgsAndRet()
const override {
101 return getTarget().hasBFloat16Type();
105 void appendAttributeMangling(TargetClonesAttr *Attr,
unsigned Index,
106 raw_ostream &Out)
const override;
107 void appendAttributeMangling(StringRef AttrStr,
108 raw_ostream &Out)
const override;
113 explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
114 : SwiftABIInfo(CGT,
true) {}
117 unsigned NumElts)
const override;
123 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT,
Kind)) {
124 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
127 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
128 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
131 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M)
const override {
135 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
137 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
138 CodeGen::CodeGenModule &CGM)
const override {
139 auto *
Fn = dyn_cast<llvm::Function>(GV);
143 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
144 TargetInfo::BranchProtectionInfo BPI(CGM.
getLangOpts());
146 if (FD && FD->hasAttr<TargetAttr>()) {
147 const auto *TA = FD->getAttr<TargetAttr>();
148 ParsedTargetAttr Attr =
150 if (!Attr.BranchProtection.empty()) {
154 assert(
Error.empty());
157 setBranchProtectionFnAttributes(BPI, *Fn);
161 bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
162 llvm::Type *Ty)
const override {
164 auto *ST = dyn_cast<llvm::StructType>(Ty);
165 if (ST && ST->getNumElements() == 1) {
166 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
167 if (AT && AT->getNumElements() == 8 &&
168 AT->getElementType()->isIntegerTy(64))
175 void checkFunctionABI(CodeGenModule &CGM,
176 const FunctionDecl *
Decl)
const override;
178 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
179 const FunctionDecl *Caller,
180 const FunctionDecl *Callee,
const CallArgList &Args,
181 QualType ReturnType)
const override;
183 bool wouldInliningViolateFunctionCallABI(
184 const FunctionDecl *Caller,
const FunctionDecl *Callee)
const override;
189 void checkFunctionCallABIStreaming(CodeGenModule &CGM, SourceLocation CallLoc,
190 const FunctionDecl *Caller,
191 const FunctionDecl *Callee)
const;
194 void checkFunctionCallABISoftFloat(CodeGenModule &CGM, SourceLocation CallLoc,
195 const FunctionDecl *Caller,
196 const FunctionDecl *Callee,
197 const CallArgList &Args,
198 QualType ReturnType)
const;
201class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
203 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT,
AArch64ABIKind K)
204 : AArch64TargetCodeGenInfo(CGT, K) {}
206 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
207 CodeGen::CodeGenModule &CGM)
const override;
209 void getDependentLibraryOption(llvm::StringRef Lib,
210 llvm::SmallString<24> &Opt)
const override {
211 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
214 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
215 llvm::SmallString<32> &Opt)
const override {
216 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
220void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
222 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
223 if (GV->isDeclaration())
225 addStackProbeTargetAttributes(D, GV, CGM);
230AArch64ABIInfo::convertFixedToScalableVectorType(
const VectorType *VT)
const {
233 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
235 BuiltinType::UChar &&
236 "unexpected builtin type for SVE predicate!");
237 return llvm::ScalableVectorType::get(llvm::Type::getInt1Ty(getVMContext()),
243 switch (BT->getKind()) {
245 llvm_unreachable(
"unexpected builtin type for SVE vector!");
247 case BuiltinType::SChar:
248 case BuiltinType::UChar:
249 case BuiltinType::MFloat8:
250 return llvm::ScalableVectorType::get(
251 llvm::Type::getInt8Ty(getVMContext()), 16);
253 case BuiltinType::Short:
254 case BuiltinType::UShort:
255 return llvm::ScalableVectorType::get(
256 llvm::Type::getInt16Ty(getVMContext()), 8);
258 case BuiltinType::Int:
259 case BuiltinType::UInt:
260 return llvm::ScalableVectorType::get(
261 llvm::Type::getInt32Ty(getVMContext()), 4);
263 case BuiltinType::Long:
264 case BuiltinType::ULong:
265 return llvm::ScalableVectorType::get(
266 llvm::Type::getInt64Ty(getVMContext()), 2);
268 case BuiltinType::Half:
269 return llvm::ScalableVectorType::get(
270 llvm::Type::getHalfTy(getVMContext()), 8);
272 case BuiltinType::Float:
273 return llvm::ScalableVectorType::get(
274 llvm::Type::getFloatTy(getVMContext()), 4);
276 case BuiltinType::Double:
277 return llvm::ScalableVectorType::get(
278 llvm::Type::getDoubleTy(getVMContext()), 2);
280 case BuiltinType::BFloat16:
281 return llvm::ScalableVectorType::get(
282 llvm::Type::getBFloatTy(getVMContext()), 8);
286 llvm_unreachable(
"expected fixed-length SVE vector");
289ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty,
unsigned &NSRN,
290 unsigned &NPRN)
const {
293 const auto *VT = Ty->
castAs<VectorType>();
294 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
297 BuiltinType::UChar &&
298 "unexpected builtin type for SVE predicate!");
299 NPRN = std::min(NPRN + 1, 4u);
301 llvm::Type::getInt1Ty(getVMContext()), 16));
305 NSRN = std::min(NSRN + 1, 8u);
311 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
312 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
316 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
320 NSRN = std::min(NSRN + 1, 8u);
322 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
326 NSRN = std::min(NSRN + 1, 8u);
328 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
332 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
336ABIArgInfo AArch64ABIInfo::coerceAndExpandPureScalableAggregate(
337 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
338 const SmallVectorImpl<llvm::Type *> &UnpaddedCoerceToSeq,
unsigned &NSRN,
339 unsigned &NPRN)
const {
340 if (!IsNamedArg || NSRN + NVec > 8 || NPRN + NPred > 4)
341 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
350 llvm::Type *UnpaddedCoerceToType =
351 UnpaddedCoerceToSeq.size() == 1
352 ? UnpaddedCoerceToSeq[0]
353 : llvm::StructType::get(CGT.getLLVMContext(), UnpaddedCoerceToSeq,
356 SmallVector<llvm::Type *> CoerceToSeq;
357 flattenType(CGT.ConvertType(Ty), CoerceToSeq);
359 llvm::StructType::get(CGT.getLLVMContext(), CoerceToSeq,
false);
364ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
bool IsVariadicFn,
366 unsigned CallingConvention,
368 unsigned &NPRN)
const {
372 if (isIllegalVectorType(Ty))
373 return coerceIllegalVector(Ty, NSRN, NPRN);
375 if (!passAsAggregateType(Ty)) {
378 Ty = ED->getIntegerType();
380 if (
const auto *EIT = Ty->
getAs<BitIntType>())
381 if (EIT->getNumBits() > 128)
382 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
386 NSRN = std::min(NSRN + 1, 8u);
387 else if (
const auto *BT = Ty->
getAs<BuiltinType>()) {
388 if (BT->isFloatingPoint())
389 NSRN = std::min(NSRN + 1, 8u);
391 switch (BT->getKind()) {
392 case BuiltinType::SveBool:
393 case BuiltinType::SveCount:
394 NPRN = std::min(NPRN + 1, 4u);
396 case BuiltinType::SveBoolx2:
397 NPRN = std::min(NPRN + 2, 4u);
399 case BuiltinType::SveBoolx4:
400 NPRN = std::min(NPRN + 4, 4u);
403 if (BT->isSVESizelessBuiltinType())
405 NSRN + getContext().getBuiltinVectorTypeInfo(BT).NumVectors,
411 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
419 return getNaturalAlignIndirect(
420 Ty, getDataLayout().getAllocaAddrSpace(),
429 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
445 bool IsWin64 =
Kind == AArch64ABIKind::Win64 ||
446 CallingConvention == llvm::CallingConv::Win64;
447 bool IsWinVariadic = IsWin64 && IsVariadicFn;
450 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
451 NSRN = std::min(NSRN + Members,
uint64_t(8));
452 if (Kind != AArch64ABIKind::AAPCS)
454 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
459 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
460 Align = (Align >= 16) ? 16 : 8;
462 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
463 nullptr,
true, Align);
468 if (Kind == AArch64ABIKind::AAPCS) {
469 unsigned NVec = 0, NPred = 0;
470 SmallVector<llvm::Type *> UnpaddedCoerceToSeq;
471 if (passAsPureScalableType(Ty, NVec, NPred, UnpaddedCoerceToSeq) &&
473 return coerceAndExpandPureScalableAggregate(
474 Ty, IsNamedArg, NVec, NPred, UnpaddedCoerceToSeq, NSRN, NPRN);
480 if (Kind == AArch64ABIKind::AAPCS) {
481 Alignment = getContext().getTypeUnadjustedAlign(Ty);
482 Alignment = Alignment < 128 ? 64 : 128;
485 std::max(getContext().getTypeAlign(Ty),
486 (
unsigned)getTarget().getPointerWidth(LangAS::Default));
488 Size = llvm::alignTo(Size, Alignment);
493 auto ContainsOnlyPointers = [&](
const auto &
Self, QualType Ty) {
499 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
500 for (
const auto &I : CXXRD->bases())
504 return all_of(RD->fields(), [&](FieldDecl *FD) {
505 QualType FDTy = FD->getType();
506 if (FDTy->isArrayType())
507 FDTy = getContext().getBaseElementType(FDTy);
508 return (FDTy->isPointerOrReferenceType() &&
509 getContext().getTypeSize(FDTy) == 64 &&
510 !FDTy->getPointeeType().hasAddressSpace()) ||
517 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
518 if ((Size == 64 || Size == 128) && Alignment == 64 &&
519 ContainsOnlyPointers(ContainsOnlyPointers, Ty))
520 BaseTy = llvm::PointerType::getUnqual(getVMContext());
522 Size == Alignment ? BaseTy
523 : llvm::ArrayType::get(BaseTy, Size / Alignment));
526 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
530ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
531 bool IsVariadicFn)
const {
535 if (
const auto *VT = RetTy->
getAs<VectorType>()) {
537 VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
538 unsigned NSRN = 0, NPRN = 0;
539 return coerceIllegalVector(RetTy, NSRN, NPRN);
544 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
545 return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
547 if (!passAsAggregateType(RetTy)) {
550 RetTy = ED->getIntegerType();
552 if (
const auto *EIT = RetTy->
getAs<BitIntType>())
553 if (EIT->getNumBits() > 128)
554 return getNaturalAlignIndirect(RetTy,
555 getDataLayout().getAllocaAddrSpace());
557 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
569 if (isHomogeneousAggregate(RetTy, Base, Members) &&
570 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
578 if (Kind == AArch64ABIKind::AAPCS) {
579 unsigned NSRN = 0, NPRN = 0;
580 unsigned NVec = 0, NPred = 0;
581 SmallVector<llvm::Type *> UnpaddedCoerceToSeq;
582 if (passAsPureScalableType(RetTy, NVec, NPred, UnpaddedCoerceToSeq) &&
584 return coerceAndExpandPureScalableAggregate(
585 RetTy,
true, NVec, NPred, UnpaddedCoerceToSeq, NSRN,
591 if (Size <= 64 && getDataLayout().isLittleEndian()) {
599 llvm::IntegerType::get(getVMContext(), Size));
602 unsigned Alignment = getContext().getTypeAlign(RetTy);
603 Size = llvm::alignTo(Size, 64);
607 if (Alignment < 128 && Size == 128) {
608 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
614 return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
618bool AArch64ABIInfo::isIllegalVectorType(QualType Ty)
const {
619 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
631 if (!llvm::isPowerOf2_32(NumElements))
636 llvm::Triple Triple = getTarget().getTriple();
637 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
638 Triple.isOSBinFormatMachO())
641 return Size != 64 && (
Size != 128 || NumElements == 1);
646bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
648 unsigned NumElts)
const {
649 if (!llvm::isPowerOf2_32(NumElts))
657bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty)
const {
667 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
668 if (BT->isFloatingPoint())
670 }
else if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
672 Kind == VectorKind::SveFixedLengthData ||
673 Kind == VectorKind::SveFixedLengthPredicate)
676 unsigned VecSize = getContext().getTypeSize(VT);
677 if (VecSize == 64 || VecSize == 128)
683bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
684 uint64_t Members)
const {
688bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
698bool AArch64ABIInfo::passAsAggregateType(QualType Ty)
const {
700 const auto *BT = Ty->
castAs<BuiltinType>();
701 return !BT->isSVECount() &&
702 getContext().getBuiltinVectorTypeInfo(BT).NumVectors > 1;
715bool AArch64ABIInfo::passAsPureScalableType(
716 QualType Ty,
unsigned &NVec,
unsigned &NPred,
717 SmallVectorImpl<llvm::Type *> &CoerceToSeq)
const {
718 if (
const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
723 unsigned NV = 0, NP = 0;
724 SmallVector<llvm::Type *> EltCoerceToSeq;
725 if (!passAsPureScalableType(AT->getElementType(), NV, NP, EltCoerceToSeq))
728 if (CoerceToSeq.size() + NElt * EltCoerceToSeq.size() > 12)
731 for (uint64_t I = 0; I < NElt; ++I)
732 llvm::append_range(CoerceToSeq, EltCoerceToSeq);
746 const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
751 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
752 for (
const auto &I : CXXRD->bases()) {
755 if (!passAsPureScalableType(I.getType(), NVec, NPred, CoerceToSeq))
761 for (
const auto *FD : RD->
fields()) {
762 QualType FT = FD->getType();
765 if (!passAsPureScalableType(FT, NVec, NPred, CoerceToSeq))
772 if (
const auto *VT = Ty->
getAs<VectorType>()) {
773 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
775 if (CoerceToSeq.size() + 1 > 12)
777 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
783 if (CoerceToSeq.size() + 1 > 12)
785 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
796 switch (Ty->
castAs<BuiltinType>()->getKind()) {
797#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
798 case BuiltinType::Id: \
799 isPredicate = false; \
801#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
802 case BuiltinType::Id: \
803 isPredicate = true; \
805#include "clang/Basic/AArch64ACLETypes.def"
810 ASTContext::BuiltinVectorTypeInfo Info =
813 "Expected 1, 2, 3 or 4 vectors!");
819 ? llvm::Type::getInt8Ty(getVMContext())
820 : CGT.ConvertType(Info.ElementType);
821 auto *VTy = llvm::ScalableVectorType::get(EltTy, Info.
EC.getKnownMinValue());
823 if (CoerceToSeq.size() + Info.
NumVectors > 12)
825 std::fill_n(std::back_inserter(CoerceToSeq), Info.
NumVectors, VTy);
833void AArch64ABIInfo::flattenType(
834 llvm::Type *Ty, SmallVectorImpl<llvm::Type *> &Flattened)
const {
837 Flattened.push_back(Ty);
841 if (
const auto *AT = dyn_cast<llvm::ArrayType>(Ty)) {
842 uint64_t NElt = AT->getNumElements();
846 SmallVector<llvm::Type *> EltFlattened;
847 flattenType(AT->getElementType(), EltFlattened);
849 for (uint64_t I = 0; I < NElt; ++I)
850 llvm::append_range(Flattened, EltFlattened);
854 if (
const auto *ST = dyn_cast<llvm::StructType>(Ty)) {
855 for (
auto *ET : ST->elements())
856 flattenType(ET, Flattened);
860 Flattened.push_back(Ty);
863RValue AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
865 AggValueSlot Slot)
const {
869 unsigned NSRN = 0, NPRN = 0;
881 BaseTy = llvm::PointerType::getUnqual(BaseTy->getContext());
885 unsigned NumRegs = 1;
886 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
887 BaseTy = ArrTy->getElementType();
888 NumRegs = ArrTy->getNumElements();
891 !isSoftFloat() && (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
909 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
910 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
913 llvm::Value *reg_offs =
nullptr;
915 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
921 RegSize = llvm::alignTo(RegSize, 8);
927 RegSize = 16 * NumRegs;
938 llvm::Value *UsingStack =
nullptr;
939 UsingStack = CGF.
Builder.CreateICmpSGE(
940 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
942 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
951 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
954 reg_offs = CGF.
Builder.CreateAdd(
955 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
957 reg_offs = CGF.
Builder.CreateAnd(
958 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
966 llvm::Value *NewOffset =
nullptr;
967 NewOffset = CGF.
Builder.CreateAdd(
968 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
973 llvm::Value *InRegs =
nullptr;
974 InRegs = CGF.
Builder.CreateICmpSLE(
975 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
977 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
987 llvm::Value *reg_top =
nullptr;
999 MemTy = llvm::PointerType::getUnqual(MemTy->getContext());
1004 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
1005 if (IsHFA && NumMembers > 1) {
1010 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
1011 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
1012 llvm::Type *BaseTy = CGF.
ConvertType(QualType(Base, 0));
1013 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
1015 std::max(TyAlign, BaseTyInfo.Align));
1020 BaseTyInfo.Width.getQuantity() < 16)
1021 Offset = 16 - BaseTyInfo.Width.getQuantity();
1023 for (
unsigned i = 0; i < NumMembers; ++i) {
1040 CharUnits SlotSize = BaseAddr.getAlignment();
1043 TySize < SlotSize) {
1044 CharUnits Offset = SlotSize - TySize;
1066 Address OnStackAddr = Address(OnStackPtr, CGF.
Int8Ty,
1071 CharUnits StackSize;
1073 StackSize = StackSlotSize;
1075 StackSize = TySize.
alignTo(StackSlotSize);
1079 CGF.
Int8Ty, OnStackPtr, StackSizeC,
"new_stack");
1085 TySize < StackSlotSize) {
1086 CharUnits Offset = StackSlotSize - TySize;
1099 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
1100 OnStackBlock,
"vaargs.addr");
1113RValue AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
1114 CodeGenFunction &CGF,
1115 AggValueSlot Slot)
const {
1125 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
1134 auto TyInfo = getContext().getTypeInfoInChars(Ty);
1138 bool IsIndirect =
false;
1139 if (TyInfo.Width.getQuantity() > 16) {
1142 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
1149RValue AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1150 QualType Ty, AggValueSlot Slot)
const {
1151 bool IsIndirect =
false;
1165 return T->getAArch64SMEAttributes() &
1173 const StringRef ABIName,
1174 const AArch64ABIInfo &
ABIInfo,
1177 const Type *HABase =
nullptr;
1178 uint64_t HAMembers = 0;
1181 Diags.
Report(loc, diag::err_target_unsupported_type_for_abi)
1189void AArch64TargetCodeGenInfo::checkFunctionABI(
1190 CodeGenModule &CGM,
const FunctionDecl *FuncDecl)
const {
1191 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
1192 const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
1194 if (!TI.
hasFeature(
"fp") && !ABIInfo.isSoftFloat()) {
1198 for (ParmVarDecl *PVD : FuncDecl->
parameters()) {
1222 bool CallerIsStreaming =
1224 bool CalleeIsStreaming =
1231 if (!CalleeIsStreamingCompatible &&
1232 (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible)) {
1233 if (CalleeIsStreaming)
1238 if (
auto *NewAttr = Callee->getAttr<ArmNewAttr>()) {
1239 if (NewAttr->isNewZA())
1241 if (NewAttr->isNewZT0())
1245 return Inlinability;
1248void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
1249 CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
1250 const FunctionDecl *Callee)
const {
1251 if (!Caller || !Callee || !
Callee->hasAttr<AlwaysInlineAttr>())
1262 ? diag::err_function_always_inline_attribute_mismatch
1263 : diag::warn_function_always_inline_attribute_mismatch)
1268 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_za)
1269 <<
Callee->getDeclName();
1273 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_zt0)
1274 <<
Callee->getDeclName();
1280void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
1281 CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
1282 const FunctionDecl *Callee,
const CallArgList &Args,
1283 QualType ReturnType)
const {
1284 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
1285 const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
1287 if (!Caller || TI.
hasFeature(
"fp") || ABIInfo.isSoftFloat())
1291 Callee ? Callee : Caller, CallLoc);
1293 for (
const CallArg &Arg : Args)
1295 Callee ? Callee : Caller, CallLoc);
1298void AArch64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
1299 SourceLocation CallLoc,
1300 const FunctionDecl *Caller,
1301 const FunctionDecl *Callee,
1302 const CallArgList &Args,
1303 QualType ReturnType)
const {
1304 checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
1305 checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
1308bool AArch64TargetCodeGenInfo::wouldInliningViolateFunctionCallABI(
1309 const FunctionDecl *Caller,
const FunctionDecl *Callee)
const {
1310 return Caller &&
Callee &&
1314void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
1316 raw_ostream &Out)
const {
1317 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
1320void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
1321 raw_ostream &Out)
const {
1322 if (AttrStr ==
"default") {
1328 SmallVector<StringRef, 8> Features;
1329 AttrStr.split(Features,
"+");
1330 for (
auto &Feat : Features)
1333 llvm::sort(Features, [](
const StringRef LHS,
const StringRef RHS) {
1334 return LHS.compare(RHS) < 0;
1337 llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
1338 for (
auto &Feat : Features)
1339 if (
auto Ext = llvm::AArch64::parseFMVExtension(Feat))
1340 if (UniqueFeats.insert(Ext->Name).second)
1341 Out <<
'M' << Ext->Name;
1344std::unique_ptr<TargetCodeGenInfo>
1347 return std::make_unique<AArch64TargetCodeGenInfo>(CGM.
getTypes(), Kind);
1350std::unique_ptr<TargetCodeGenInfo>
1353 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.
getTypes(), K);
static bool isStreamingCompatible(const FunctionDecl *F)
@ ErrorCalleeRequiresNewZA
@ WarnIncompatibleStreamingModes
@ ErrorCalleeRequiresNewZT0
@ IncompatibleStreamingModes
@ LLVM_MARK_AS_BITMASK_ENUM
@ ErrorIncompatibleStreamingModes
static ArmSMEInlinability GetArmSMEInlinability(const FunctionDecl *Caller, const FunctionDecl *Callee)
Determines if there are any Arm SME ABI issues with inlining Callee into Caller.
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D, SourceLocation loc)
TypeInfoChars getTypeInfoInChars(const Type *T) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
static ABIArgInfo getIgnore()
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::ConstantInt * getSize(CharUnits N)
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
RequiredArgs getRequiredArgs() const
llvm::Type * ConvertType(QualType T)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
const TargetInfo & getTarget() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const CodeGenOptions & getCodeGenOpts() const
unsigned getNumRequiredArgs() const
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Decl - This represents one declaration (or definition), e.g.
SourceLocation getLocation() const
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Represents a function declaration or definition.
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
Represents a prototype with parameter type info, e.g.
@ SME_PStateSMCompatibleMask
This represents a decl that may have a name.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
A (possibly-)qualified type.
field_range fields() const
Encodes a location in the source.
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, const LangOptions &LO, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
virtual StringRef getABI() const
Get the ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
The base class of the type hierarchy.
bool isMFloat8Type() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isSVESizelessBuiltinType() const
Returns true for SVE scalable vector types.
const T * castAs() const
Member-template castAs<specific type>.
bool isBuiltinType() const
Helper methods to distinguish type categories.
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
unsigned getNumElements() const
VectorKind getVectorKind() const
QualType getElementType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
bool isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Is the given vector type "legal" for Swift's perspective on the current platform?
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
const FunctionProtoType * T
@ Type
The name was classified as a type.
U cast(CodeGen::Address addr)
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty