10#include "TargetInfo.h"
13#include "llvm/TargetParser/AArch64TargetParser.h"
24class AArch64ABIInfo :
public ABIInfo {
29 : ABIInfo(CGT), Kind(Kind) {}
31 bool isSoftFloat()
const {
return Kind == AArch64ABIKind::AAPCSSoft; }
35 bool isDarwinPCS()
const {
return Kind == AArch64ABIKind::DarwinPCS; }
39 bool IsNamedArg,
unsigned CallingConvention,
40 unsigned &NSRN,
unsigned &NPRN)
const;
41 llvm::Type *convertFixedToScalableVectorType(
const VectorType *VT)
const;
42 ABIArgInfo coerceIllegalVector(QualType Ty,
unsigned &NSRN,
43 unsigned &NPRN)
const;
44 ABIArgInfo coerceAndExpandPureScalableAggregate(
45 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
46 const SmallVectorImpl<llvm::Type *> &UnpaddedCoerceToSeq,
unsigned &NSRN,
47 unsigned &NPRN)
const;
48 bool isHomogeneousAggregateBaseType(QualType Ty)
const override;
49 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
50 uint64_t Members)
const override;
51 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate()
const override;
53 bool isIllegalVectorType(QualType Ty)
const;
55 bool passAsAggregateType(QualType Ty)
const;
56 bool passAsPureScalableType(QualType Ty,
unsigned &NV,
unsigned &NP,
57 SmallVectorImpl<llvm::Type *> &CoerceToSeq)
const;
59 void flattenType(llvm::Type *Ty,
60 SmallVectorImpl<llvm::Type *> &Flattened)
const;
62 void computeInfo(CGFunctionInfo &FI)
const override {
68 unsigned NSRN = 0, NPRN = 0;
70 const bool IsNamedArg =
78 RValue EmitDarwinVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
79 AggValueSlot Slot)
const;
81 RValue EmitAAPCSVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
84 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
85 AggValueSlot Slot)
const override {
88 llvm::report_fatal_error(
"Passing SVE types to variadic functions is "
89 "currently not supported");
91 return Kind == AArch64ABIKind::Win64
92 ? EmitMSVAArg(CGF, VAListAddr, Ty, Slot)
93 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)
94 : EmitAAPCSVAArg(VAListAddr, Ty, CGF, Kind, Slot);
97 RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
98 AggValueSlot Slot)
const override;
100 bool allowBFloatArgsAndRet()
const override {
101 return getTarget().hasBFloat16Type();
105 void appendAttributeMangling(TargetClonesAttr *Attr,
unsigned Index,
106 raw_ostream &Out)
const override;
107 void appendAttributeMangling(StringRef AttrStr,
108 raw_ostream &Out)
const override;
113 explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
114 : SwiftABIInfo(CGT,
true) {}
117 unsigned NumElts)
const override;
123 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT,
Kind)) {
124 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
127 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
128 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
131 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M)
const override {
135 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
137 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
138 CodeGen::CodeGenModule &CGM)
const override {
139 auto *
Fn = dyn_cast<llvm::Function>(GV);
143 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
144 TargetInfo::BranchProtectionInfo BPI(CGM.
getLangOpts());
146 if (FD && FD->hasAttr<TargetAttr>()) {
147 const auto *TA = FD->getAttr<TargetAttr>();
148 ParsedTargetAttr Attr =
150 if (!Attr.BranchProtection.empty()) {
154 assert(
Error.empty());
157 setBranchProtectionFnAttributes(BPI, *Fn);
161 bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
162 llvm::Type *Ty)
const override {
164 auto *ST = dyn_cast<llvm::StructType>(Ty);
165 if (ST && ST->getNumElements() == 1) {
166 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
167 if (AT && AT->getNumElements() == 8 &&
168 AT->getElementType()->isIntegerTy(64))
175 void checkFunctionABI(CodeGenModule &CGM,
176 const FunctionDecl *
Decl)
const override;
178 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
179 const FunctionDecl *Caller,
180 const FunctionDecl *Callee,
const CallArgList &Args,
181 QualType ReturnType)
const override;
183 bool wouldInliningViolateFunctionCallABI(
184 const FunctionDecl *Caller,
const FunctionDecl *Callee)
const override;
189 void checkFunctionCallABIStreaming(CodeGenModule &CGM, SourceLocation CallLoc,
190 const FunctionDecl *Caller,
191 const FunctionDecl *Callee)
const;
194 void checkFunctionCallABISoftFloat(CodeGenModule &CGM, SourceLocation CallLoc,
195 const FunctionDecl *Caller,
196 const FunctionDecl *Callee,
197 const CallArgList &Args,
198 QualType ReturnType)
const;
201class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
203 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT,
AArch64ABIKind K)
204 : AArch64TargetCodeGenInfo(CGT, K) {}
206 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
207 CodeGen::CodeGenModule &CGM)
const override;
209 void getDependentLibraryOption(llvm::StringRef Lib,
210 llvm::SmallString<24> &Opt)
const override {
211 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
214 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
215 llvm::SmallString<32> &Opt)
const override {
216 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
220void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
222 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
223 if (GV->isDeclaration())
225 addStackProbeTargetAttributes(D, GV, CGM);
230AArch64ABIInfo::convertFixedToScalableVectorType(
const VectorType *VT)
const {
233 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
235 BuiltinType::UChar &&
236 "unexpected builtin type for SVE predicate!");
237 return llvm::ScalableVectorType::get(llvm::Type::getInt1Ty(getVMContext()),
243 switch (BT->getKind()) {
245 llvm_unreachable(
"unexpected builtin type for SVE vector!");
247 case BuiltinType::SChar:
248 case BuiltinType::UChar:
249 case BuiltinType::MFloat8:
250 return llvm::ScalableVectorType::get(
251 llvm::Type::getInt8Ty(getVMContext()), 16);
253 case BuiltinType::Short:
254 case BuiltinType::UShort:
255 return llvm::ScalableVectorType::get(
256 llvm::Type::getInt16Ty(getVMContext()), 8);
258 case BuiltinType::Int:
259 case BuiltinType::UInt:
260 return llvm::ScalableVectorType::get(
261 llvm::Type::getInt32Ty(getVMContext()), 4);
263 case BuiltinType::Long:
264 case BuiltinType::ULong:
265 return llvm::ScalableVectorType::get(
266 llvm::Type::getInt64Ty(getVMContext()), 2);
268 case BuiltinType::Half:
269 return llvm::ScalableVectorType::get(
270 llvm::Type::getHalfTy(getVMContext()), 8);
272 case BuiltinType::Float:
273 return llvm::ScalableVectorType::get(
274 llvm::Type::getFloatTy(getVMContext()), 4);
276 case BuiltinType::Double:
277 return llvm::ScalableVectorType::get(
278 llvm::Type::getDoubleTy(getVMContext()), 2);
280 case BuiltinType::BFloat16:
281 return llvm::ScalableVectorType::get(
282 llvm::Type::getBFloatTy(getVMContext()), 8);
286 llvm_unreachable(
"expected fixed-length SVE vector");
289ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty,
unsigned &NSRN,
290 unsigned &NPRN)
const {
293 const auto *VT = Ty->
castAs<VectorType>();
294 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
297 BuiltinType::UChar &&
298 "unexpected builtin type for SVE predicate!");
299 NPRN = std::min(NPRN + 1, 4u);
301 llvm::Type::getInt1Ty(getVMContext()), 16));
305 NSRN = std::min(NSRN + 1, 8u);
311 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
312 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
316 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
320 NSRN = std::min(NSRN + 1, 8u);
322 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
326 NSRN = std::min(NSRN + 1, 8u);
328 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
332 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
336ABIArgInfo AArch64ABIInfo::coerceAndExpandPureScalableAggregate(
337 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
338 const SmallVectorImpl<llvm::Type *> &UnpaddedCoerceToSeq,
unsigned &NSRN,
339 unsigned &NPRN)
const {
340 if (!IsNamedArg || NSRN + NVec > 8 || NPRN + NPred > 4)
341 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
350 llvm::Type *UnpaddedCoerceToType =
351 UnpaddedCoerceToSeq.size() == 1
352 ? UnpaddedCoerceToSeq[0]
353 : llvm::StructType::get(CGT.getLLVMContext(), UnpaddedCoerceToSeq,
356 SmallVector<llvm::Type *> CoerceToSeq;
357 flattenType(CGT.ConvertType(Ty), CoerceToSeq);
359 llvm::StructType::get(CGT.getLLVMContext(), CoerceToSeq,
false);
364ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
bool IsVariadicFn,
366 unsigned CallingConvention,
368 unsigned &NPRN)
const {
372 if (isIllegalVectorType(Ty))
373 return coerceIllegalVector(Ty, NSRN, NPRN);
375 if (!passAsAggregateType(Ty)) {
378 Ty = ED->getIntegerType();
380 if (
const auto *EIT = Ty->
getAs<BitIntType>())
381 if (EIT->getNumBits() > 128)
382 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
386 NSRN = std::min(NSRN + 1, 8u);
387 else if (
const auto *BT = Ty->
getAs<BuiltinType>()) {
388 if (BT->isFloatingPoint())
389 NSRN = std::min(NSRN + 1, 8u);
391 switch (BT->getKind()) {
392 case BuiltinType::SveBool:
393 case BuiltinType::SveCount:
394 NPRN = std::min(NPRN + 1, 4u);
396 case BuiltinType::SveBoolx2:
397 NPRN = std::min(NPRN + 2, 4u);
399 case BuiltinType::SveBoolx4:
400 NPRN = std::min(NPRN + 4, 4u);
403 if (BT->isSVESizelessBuiltinType())
405 NSRN + getContext().getBuiltinVectorTypeInfo(BT).NumVectors,
411 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
419 return getNaturalAlignIndirect(
420 Ty, getDataLayout().getAllocaAddrSpace(),
435 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
448 bool IsWin64 =
Kind == AArch64ABIKind::Win64 ||
449 CallingConvention == llvm::CallingConv::Win64;
450 bool IsWinVariadic = IsWin64 && IsVariadicFn;
453 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
454 NSRN = std::min(NSRN + Members,
uint64_t(8));
455 if (Kind != AArch64ABIKind::AAPCS)
457 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
462 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
463 Align = (Align >= 16) ? 16 : 8;
465 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
466 nullptr,
true, Align);
471 if (Kind == AArch64ABIKind::AAPCS) {
472 unsigned NVec = 0, NPred = 0;
473 SmallVector<llvm::Type *> UnpaddedCoerceToSeq;
474 if (passAsPureScalableType(Ty, NVec, NPred, UnpaddedCoerceToSeq) &&
476 return coerceAndExpandPureScalableAggregate(
477 Ty, IsNamedArg, NVec, NPred, UnpaddedCoerceToSeq, NSRN, NPRN);
483 if (Kind == AArch64ABIKind::AAPCS) {
484 Alignment = getContext().getTypeUnadjustedAlign(Ty);
485 Alignment = Alignment < 128 ? 64 : 128;
488 std::max(getContext().getTypeAlign(Ty),
489 (
unsigned)getTarget().getPointerWidth(LangAS::Default));
491 Size = llvm::alignTo(Size, Alignment);
496 auto ContainsOnlyPointers = [&](
const auto &
Self, QualType Ty) {
502 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
503 for (
const auto &I : CXXRD->bases())
507 return all_of(RD->fields(), [&](FieldDecl *FD) {
508 QualType FDTy = FD->getType();
509 if (FDTy->isArrayType())
510 FDTy = getContext().getBaseElementType(FDTy);
511 return (FDTy->isPointerOrReferenceType() &&
512 getContext().getTypeSize(FDTy) == 64 &&
513 !FDTy->getPointeeType().hasAddressSpace()) ||
520 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
521 if ((Size == 64 || Size == 128) && Alignment == 64 &&
522 ContainsOnlyPointers(ContainsOnlyPointers, Ty))
523 BaseTy = llvm::PointerType::getUnqual(getVMContext());
525 Size == Alignment ? BaseTy
526 : llvm::ArrayType::get(BaseTy, Size / Alignment));
529 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
533ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
534 bool IsVariadicFn)
const {
538 if (
const auto *VT = RetTy->
getAs<VectorType>()) {
540 VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
541 unsigned NSRN = 0, NPRN = 0;
542 return coerceIllegalVector(RetTy, NSRN, NPRN);
547 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
548 return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
550 if (!passAsAggregateType(RetTy)) {
553 RetTy = ED->getIntegerType();
555 if (
const auto *EIT = RetTy->
getAs<BitIntType>())
556 if (EIT->getNumBits() > 128)
557 return getNaturalAlignIndirect(RetTy,
558 getDataLayout().getAllocaAddrSpace());
560 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
572 if (isHomogeneousAggregate(RetTy, Base, Members) &&
573 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
581 if (Kind == AArch64ABIKind::AAPCS) {
582 unsigned NSRN = 0, NPRN = 0;
583 unsigned NVec = 0, NPred = 0;
584 SmallVector<llvm::Type *> UnpaddedCoerceToSeq;
585 if (passAsPureScalableType(RetTy, NVec, NPred, UnpaddedCoerceToSeq) &&
587 return coerceAndExpandPureScalableAggregate(
588 RetTy,
true, NVec, NPred, UnpaddedCoerceToSeq, NSRN,
594 if (Size <= 64 && getDataLayout().isLittleEndian()) {
602 llvm::IntegerType::get(getVMContext(), Size));
605 unsigned Alignment = getContext().getTypeAlign(RetTy);
606 Size = llvm::alignTo(Size, 64);
610 if (Alignment < 128 && Size == 128) {
611 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
617 return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
621bool AArch64ABIInfo::isIllegalVectorType(QualType Ty)
const {
622 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
634 if (!llvm::isPowerOf2_32(NumElements))
639 llvm::Triple Triple = getTarget().getTriple();
640 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
641 Triple.isOSBinFormatMachO())
644 return Size != 64 && (
Size != 128 || NumElements == 1);
649bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
651 unsigned NumElts)
const {
652 if (!llvm::isPowerOf2_32(NumElts))
660bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty)
const {
670 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
671 if (BT->isFloatingPoint())
673 }
else if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
675 Kind == VectorKind::SveFixedLengthData ||
676 Kind == VectorKind::SveFixedLengthPredicate)
679 unsigned VecSize = getContext().getTypeSize(VT);
680 if (VecSize == 64 || VecSize == 128)
686bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
687 uint64_t Members)
const {
691bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
701bool AArch64ABIInfo::passAsAggregateType(QualType Ty)
const {
703 const auto *BT = Ty->
castAs<BuiltinType>();
704 return !BT->isSVECount() &&
705 getContext().getBuiltinVectorTypeInfo(BT).NumVectors > 1;
718bool AArch64ABIInfo::passAsPureScalableType(
719 QualType Ty,
unsigned &NVec,
unsigned &NPred,
720 SmallVectorImpl<llvm::Type *> &CoerceToSeq)
const {
721 if (
const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
726 unsigned NV = 0, NP = 0;
727 SmallVector<llvm::Type *> EltCoerceToSeq;
728 if (!passAsPureScalableType(AT->getElementType(), NV, NP, EltCoerceToSeq))
731 if (CoerceToSeq.size() + NElt * EltCoerceToSeq.size() > 12)
734 for (uint64_t I = 0; I < NElt; ++I)
735 llvm::append_range(CoerceToSeq, EltCoerceToSeq);
749 const RecordDecl *RD = RT->getDecl()->getDefinitionOrSelf();
754 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
755 for (
const auto &I : CXXRD->bases()) {
758 if (!passAsPureScalableType(I.getType(), NVec, NPred, CoerceToSeq))
764 for (
const auto *FD : RD->
fields()) {
765 QualType FT = FD->getType();
768 if (!passAsPureScalableType(FT, NVec, NPred, CoerceToSeq))
775 if (
const auto *VT = Ty->
getAs<VectorType>()) {
776 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
778 if (CoerceToSeq.size() + 1 > 12)
780 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
786 if (CoerceToSeq.size() + 1 > 12)
788 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
799 switch (Ty->
castAs<BuiltinType>()->getKind()) {
800#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
801 case BuiltinType::Id: \
802 isPredicate = false; \
804#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
805 case BuiltinType::Id: \
806 isPredicate = true; \
808#include "clang/Basic/AArch64ACLETypes.def"
813 ASTContext::BuiltinVectorTypeInfo Info =
816 "Expected 1, 2, 3 or 4 vectors!");
822 ? llvm::Type::getInt8Ty(getVMContext())
823 : CGT.ConvertType(Info.ElementType);
824 auto *VTy = llvm::ScalableVectorType::get(EltTy, Info.
EC.getKnownMinValue());
826 if (CoerceToSeq.size() + Info.
NumVectors > 12)
828 std::fill_n(std::back_inserter(CoerceToSeq), Info.
NumVectors, VTy);
836void AArch64ABIInfo::flattenType(
837 llvm::Type *Ty, SmallVectorImpl<llvm::Type *> &Flattened)
const {
840 Flattened.push_back(Ty);
844 if (
const auto *AT = dyn_cast<llvm::ArrayType>(Ty)) {
845 uint64_t NElt = AT->getNumElements();
849 SmallVector<llvm::Type *> EltFlattened;
850 flattenType(AT->getElementType(), EltFlattened);
852 for (uint64_t I = 0; I < NElt; ++I)
853 llvm::append_range(Flattened, EltFlattened);
857 if (
const auto *ST = dyn_cast<llvm::StructType>(Ty)) {
858 for (
auto *ET : ST->elements())
859 flattenType(ET, Flattened);
863 Flattened.push_back(Ty);
866RValue AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
868 AggValueSlot Slot)
const {
872 unsigned NSRN = 0, NPRN = 0;
884 BaseTy = llvm::PointerType::getUnqual(BaseTy->getContext());
888 unsigned NumRegs = 1;
889 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
890 BaseTy = ArrTy->getElementType();
891 NumRegs = ArrTy->getNumElements();
894 !isSoftFloat() && (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
912 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
913 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
916 llvm::Value *reg_offs =
nullptr;
918 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
924 RegSize = llvm::alignTo(RegSize, 8);
930 RegSize = 16 * NumRegs;
941 llvm::Value *UsingStack =
nullptr;
942 UsingStack = CGF.
Builder.CreateICmpSGE(
943 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
945 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
954 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
957 reg_offs = CGF.
Builder.CreateAdd(
958 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
960 reg_offs = CGF.
Builder.CreateAnd(
961 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
969 llvm::Value *NewOffset =
nullptr;
970 NewOffset = CGF.
Builder.CreateAdd(
971 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
976 llvm::Value *InRegs =
nullptr;
977 InRegs = CGF.
Builder.CreateICmpSLE(
978 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
980 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
990 llvm::Value *reg_top =
nullptr;
1002 MemTy = llvm::PointerType::getUnqual(MemTy->getContext());
1007 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
1008 if (IsHFA && NumMembers > 1) {
1013 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
1014 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
1015 llvm::Type *BaseTy = CGF.
ConvertType(QualType(Base, 0));
1016 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
1018 std::max(TyAlign, BaseTyInfo.Align));
1023 BaseTyInfo.Width.getQuantity() < 16)
1024 Offset = 16 - BaseTyInfo.Width.getQuantity();
1026 for (
unsigned i = 0; i < NumMembers; ++i) {
1043 CharUnits SlotSize = BaseAddr.getAlignment();
1046 TySize < SlotSize) {
1047 CharUnits Offset = SlotSize - TySize;
1069 Address OnStackAddr = Address(OnStackPtr, CGF.
Int8Ty,
1074 CharUnits StackSize;
1076 StackSize = StackSlotSize;
1078 StackSize = TySize.
alignTo(StackSlotSize);
1082 CGF.
Int8Ty, OnStackPtr, StackSizeC,
"new_stack");
1088 TySize < StackSlotSize) {
1089 CharUnits Offset = StackSlotSize - TySize;
1102 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
1103 OnStackBlock,
"vaargs.addr");
1116RValue AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
1117 CodeGenFunction &CGF,
1118 AggValueSlot Slot)
const {
1128 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
1137 auto TyInfo = getContext().getTypeInfoInChars(Ty);
1141 bool IsIndirect =
false;
1142 if (TyInfo.Width.getQuantity() > 16) {
1145 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
1152RValue AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1153 QualType Ty, AggValueSlot Slot)
const {
1154 bool IsIndirect =
false;
1168 return T->getAArch64SMEAttributes() &
1176 const StringRef ABIName,
1177 const AArch64ABIInfo &
ABIInfo,
1180 const Type *HABase =
nullptr;
1181 uint64_t HAMembers = 0;
1184 Diags.
Report(loc, diag::err_target_unsupported_type_for_abi)
1192void AArch64TargetCodeGenInfo::checkFunctionABI(
1193 CodeGenModule &CGM,
const FunctionDecl *FuncDecl)
const {
1194 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
1195 const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
1197 if (!TI.
hasFeature(
"fp") && !ABIInfo.isSoftFloat()) {
1201 for (ParmVarDecl *PVD : FuncDecl->
parameters()) {
1225 bool CallerIsStreaming =
1227 bool CalleeIsStreaming =
1234 if (!CalleeIsStreamingCompatible &&
1235 (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible)) {
1236 if (CalleeIsStreaming)
1241 if (
auto *NewAttr = Callee->getAttr<ArmNewAttr>()) {
1242 if (NewAttr->isNewZA())
1244 if (NewAttr->isNewZT0())
1248 return Inlinability;
1251void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
1252 CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
1253 const FunctionDecl *Callee)
const {
1254 if (!Caller || !Callee || !
Callee->hasAttr<AlwaysInlineAttr>())
1265 ? diag::err_function_always_inline_attribute_mismatch
1266 : diag::warn_function_always_inline_attribute_mismatch)
1271 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_za)
1272 <<
Callee->getDeclName();
1276 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_zt0)
1277 <<
Callee->getDeclName();
1283void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
1284 CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
1285 const FunctionDecl *Callee,
const CallArgList &Args,
1286 QualType ReturnType)
const {
1287 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
1288 const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
1290 if (!Caller || TI.
hasFeature(
"fp") || ABIInfo.isSoftFloat())
1294 Callee ? Callee : Caller, CallLoc);
1296 for (
const CallArg &Arg : Args)
1298 Callee ? Callee : Caller, CallLoc);
1301void AArch64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
1302 SourceLocation CallLoc,
1303 const FunctionDecl *Caller,
1304 const FunctionDecl *Callee,
1305 const CallArgList &Args,
1306 QualType ReturnType)
const {
1307 checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
1308 checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
1311bool AArch64TargetCodeGenInfo::wouldInliningViolateFunctionCallABI(
1312 const FunctionDecl *Caller,
const FunctionDecl *Callee)
const {
1313 return Caller &&
Callee &&
1317void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
1319 raw_ostream &Out)
const {
1320 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
1323void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
1324 raw_ostream &Out)
const {
1325 if (AttrStr ==
"default") {
1331 SmallVector<StringRef, 8> Features;
1332 AttrStr.split(Features,
"+");
1333 for (
auto &Feat : Features)
1336 llvm::sort(Features, [](
const StringRef LHS,
const StringRef RHS) {
1337 return LHS.compare(RHS) < 0;
1340 llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
1341 for (
auto &Feat : Features)
1342 if (
auto Ext = llvm::AArch64::parseFMVExtension(Feat))
1343 if (UniqueFeats.insert(Ext->Name).second)
1344 Out <<
'M' << Ext->Name;
1347std::unique_ptr<TargetCodeGenInfo>
1350 return std::make_unique<AArch64TargetCodeGenInfo>(CGM.
getTypes(), Kind);
1353std::unique_ptr<TargetCodeGenInfo>
1356 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.
getTypes(), K);
static bool isStreamingCompatible(const FunctionDecl *F)
@ ErrorCalleeRequiresNewZA
@ WarnIncompatibleStreamingModes
@ ErrorCalleeRequiresNewZT0
@ IncompatibleStreamingModes
@ LLVM_MARK_AS_BITMASK_ENUM
@ ErrorIncompatibleStreamingModes
static ArmSMEInlinability GetArmSMEInlinability(const FunctionDecl *Caller, const FunctionDecl *Callee)
Determines if there are any Arm SME ABI issues with inlining Callee into Caller.
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D, SourceLocation loc)
TypeInfoChars getTypeInfoInChars(const Type *T) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
static ABIArgInfo getIgnore()
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::ConstantInt * getSize(CharUnits N)
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
RequiredArgs getRequiredArgs() const
llvm::Type * ConvertType(QualType T)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
const TargetInfo & getTarget() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const CodeGenOptions & getCodeGenOpts() const
unsigned getNumRequiredArgs() const
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Decl - This represents one declaration (or definition), e.g.
SourceLocation getLocation() const
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Represents a function declaration or definition.
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
Represents a prototype with parameter type info, e.g.
@ SME_PStateSMCompatibleMask
This represents a decl that may have a name.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
A (possibly-)qualified type.
field_range fields() const
Encodes a location in the source.
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, const LangOptions &LO, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
virtual StringRef getABI() const
Get the ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
The base class of the type hierarchy.
bool isMFloat8Type() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isSVESizelessBuiltinType() const
Returns true for SVE scalable vector types.
const T * castAs() const
Member-template castAs<specific type>.
bool isBuiltinType() const
Helper methods to distinguish type categories.
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
unsigned getNumElements() const
VectorKind getVectorKind() const
QualType getElementType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
bool isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Is the given vector type "legal" for Swift's perspective on the current platform?
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
const FunctionProtoType * T
@ Type
The name was classified as a type.
U cast(CodeGen::Address addr)
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty