10#include "TargetInfo.h"
13#include "llvm/TargetParser/AArch64TargetParser.h"
24class AArch64ABIInfo :
public ABIInfo {
27 std::unique_ptr<TargetCodeGenInfo> WinX86_64CodegenInfo;
31 : ABIInfo(CGM.getTypes()), Kind(Kind) {
32 if (getTarget().
getTriple().isWindowsArm64EC()) {
33 WinX86_64CodegenInfo =
38 bool isSoftFloat()
const {
return Kind == AArch64ABIKind::AAPCSSoft; }
42 bool isDarwinPCS()
const {
return Kind == AArch64ABIKind::DarwinPCS; }
46 bool IsNamedArg,
unsigned CallingConvention,
47 unsigned &NSRN,
unsigned &NPRN)
const;
48 llvm::Type *convertFixedToScalableVectorType(
const VectorType *VT)
const;
49 ABIArgInfo coerceIllegalVector(QualType Ty,
unsigned &NSRN,
50 unsigned &NPRN)
const;
51 ABIArgInfo coerceAndExpandPureScalableAggregate(
52 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
53 const SmallVectorImpl<llvm::Type *> &UnpaddedCoerceToSeq,
unsigned &NSRN,
54 unsigned &NPRN)
const;
55 bool isHomogeneousAggregateBaseType(QualType Ty)
const override;
56 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
57 uint64_t Members)
const override;
58 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate()
const override;
60 bool isIllegalVectorType(QualType Ty)
const;
62 bool passAsAggregateType(QualType Ty)
const;
63 bool passAsPureScalableType(QualType Ty,
unsigned &NV,
unsigned &NP,
64 SmallVectorImpl<llvm::Type *> &CoerceToSeq)
const;
66 void flattenType(llvm::Type *Ty,
67 SmallVectorImpl<llvm::Type *> &Flattened)
const;
69 void computeInfo(CGFunctionInfo &FI)
const override {
75 unsigned NSRN = 0, NPRN = 0;
77 const bool IsNamedArg =
85 RValue EmitDarwinVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
86 AggValueSlot Slot)
const;
88 RValue EmitAAPCSVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
91 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
92 AggValueSlot Slot)
const override {
95 llvm::report_fatal_error(
"Passing SVE types to variadic functions is "
96 "currently not supported");
98 return Kind == AArch64ABIKind::Win64
99 ? EmitMSVAArg(CGF, VAListAddr, Ty, Slot)
100 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)
101 : EmitAAPCSVAArg(VAListAddr, Ty, CGF, Kind, Slot);
104 RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
105 AggValueSlot Slot)
const override;
107 bool allowBFloatArgsAndRet()
const override {
108 return getTarget().hasBFloat16Type();
112 void appendAttributeMangling(TargetClonesAttr *Attr,
unsigned Index,
113 raw_ostream &Out)
const override;
114 void appendAttributeMangling(StringRef AttrStr,
115 raw_ostream &Out)
const override;
120 explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
121 : SwiftABIInfo(CGT,
true) {}
124 unsigned NumElts)
const override;
130 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGM,
Kind)) {
131 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGM.
getTypes());
134 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
135 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
138 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M)
const override {
142 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
144 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
145 CodeGen::CodeGenModule &CGM)
const override {
146 auto *
Fn = dyn_cast<llvm::Function>(GV);
150 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
151 TargetInfo::BranchProtectionInfo BPI(CGM.
getLangOpts());
153 if (FD && FD->hasAttr<TargetAttr>()) {
154 const auto *TA = FD->getAttr<TargetAttr>();
155 ParsedTargetAttr Attr =
157 if (!Attr.BranchProtection.empty()) {
161 assert(
Error.empty());
164 setBranchProtectionFnAttributes(BPI, *Fn);
168 bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
169 llvm::Type *Ty)
const override {
171 auto *ST = dyn_cast<llvm::StructType>(Ty);
172 if (ST && ST->getNumElements() == 1) {
173 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
174 if (AT && AT->getNumElements() == 8 &&
175 AT->getElementType()->isIntegerTy(64))
182 void checkFunctionABI(CodeGenModule &CGM,
183 const FunctionDecl *
Decl)
const override;
185 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
186 const FunctionDecl *Caller,
187 const FunctionDecl *Callee,
const CallArgList &Args,
188 QualType ReturnType)
const override;
190 bool wouldInliningViolateFunctionCallABI(
191 const FunctionDecl *Caller,
const FunctionDecl *Callee)
const override;
196 void checkFunctionCallABIStreaming(CodeGenModule &CGM, SourceLocation CallLoc,
197 const FunctionDecl *Caller,
198 const FunctionDecl *Callee)
const;
201 void checkFunctionCallABISoftFloat(CodeGenModule &CGM, SourceLocation CallLoc,
202 const FunctionDecl *Caller,
203 const FunctionDecl *Callee,
204 const CallArgList &Args,
205 QualType ReturnType)
const;
208class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
210 WindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM,
AArch64ABIKind K)
211 : AArch64TargetCodeGenInfo(CGM, K) {}
213 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
214 CodeGen::CodeGenModule &CGM)
const override;
216 void getDependentLibraryOption(llvm::StringRef Lib,
217 llvm::SmallString<24> &Opt)
const override {
218 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
221 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
222 llvm::SmallString<32> &Opt)
const override {
223 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
227void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
229 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
230 if (GV->isDeclaration())
232 addStackProbeTargetAttributes(D, GV, CGM);
237AArch64ABIInfo::convertFixedToScalableVectorType(
const VectorType *VT)
const {
240 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
242 BuiltinType::UChar &&
243 "unexpected builtin type for SVE predicate!");
244 return llvm::ScalableVectorType::get(llvm::Type::getInt1Ty(getVMContext()),
250 switch (BT->getKind()) {
252 llvm_unreachable(
"unexpected builtin type for SVE vector!");
254 case BuiltinType::SChar:
255 case BuiltinType::UChar:
256 case BuiltinType::MFloat8:
257 return llvm::ScalableVectorType::get(
258 llvm::Type::getInt8Ty(getVMContext()), 16);
260 case BuiltinType::Short:
261 case BuiltinType::UShort:
262 return llvm::ScalableVectorType::get(
263 llvm::Type::getInt16Ty(getVMContext()), 8);
265 case BuiltinType::Int:
266 case BuiltinType::UInt:
267 return llvm::ScalableVectorType::get(
268 llvm::Type::getInt32Ty(getVMContext()), 4);
270 case BuiltinType::Long:
271 case BuiltinType::ULong:
272 return llvm::ScalableVectorType::get(
273 llvm::Type::getInt64Ty(getVMContext()), 2);
275 case BuiltinType::Half:
276 return llvm::ScalableVectorType::get(
277 llvm::Type::getHalfTy(getVMContext()), 8);
279 case BuiltinType::Float:
280 return llvm::ScalableVectorType::get(
281 llvm::Type::getFloatTy(getVMContext()), 4);
283 case BuiltinType::Double:
284 return llvm::ScalableVectorType::get(
285 llvm::Type::getDoubleTy(getVMContext()), 2);
287 case BuiltinType::BFloat16:
288 return llvm::ScalableVectorType::get(
289 llvm::Type::getBFloatTy(getVMContext()), 8);
293 llvm_unreachable(
"expected fixed-length SVE vector");
296ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty,
unsigned &NSRN,
297 unsigned &NPRN)
const {
300 const auto *VT = Ty->
castAs<VectorType>();
301 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
304 BuiltinType::UChar &&
305 "unexpected builtin type for SVE predicate!");
306 NPRN = std::min(NPRN + 1, 4u);
308 llvm::Type::getInt1Ty(getVMContext()), 16));
312 NSRN = std::min(NSRN + 1, 8u);
318 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
319 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
323 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
327 NSRN = std::min(NSRN + 1, 8u);
329 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
333 NSRN = std::min(NSRN + 1, 8u);
335 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
339 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
343ABIArgInfo AArch64ABIInfo::coerceAndExpandPureScalableAggregate(
344 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
345 const SmallVectorImpl<llvm::Type *> &UnpaddedCoerceToSeq,
unsigned &NSRN,
346 unsigned &NPRN)
const {
347 if (!IsNamedArg || NSRN + NVec > 8 || NPRN + NPred > 4)
348 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
357 llvm::Type *UnpaddedCoerceToType =
358 UnpaddedCoerceToSeq.size() == 1
359 ? UnpaddedCoerceToSeq[0]
360 : llvm::StructType::get(CGT.getLLVMContext(), UnpaddedCoerceToSeq,
363 SmallVector<llvm::Type *> CoerceToSeq;
364 flattenType(CGT.ConvertType(Ty), CoerceToSeq);
366 llvm::StructType::get(CGT.getLLVMContext(), CoerceToSeq,
false);
371ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
bool IsVariadicFn,
373 unsigned CallingConvention,
375 unsigned &NPRN)
const {
378 if (IsVariadicFn && getTarget().
getTriple().isWindowsArm64EC()) {
381 return WinX86_64CodegenInfo->getABIInfo().classifyArgForArm64ECVarArg(Ty);
385 if (isIllegalVectorType(Ty))
386 return coerceIllegalVector(Ty, NSRN, NPRN);
388 if (!passAsAggregateType(Ty)) {
391 Ty = ED->getIntegerType();
393 if (
const auto *EIT = Ty->
getAs<BitIntType>())
394 if (EIT->getNumBits() > 128)
395 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
399 NSRN = std::min(NSRN + 1, 8u);
400 else if (
const auto *BT = Ty->
getAs<BuiltinType>()) {
401 if (BT->isFloatingPoint())
402 NSRN = std::min(NSRN + 1, 8u);
404 switch (BT->getKind()) {
405 case BuiltinType::SveBool:
406 case BuiltinType::SveCount:
407 NPRN = std::min(NPRN + 1, 4u);
409 case BuiltinType::SveBoolx2:
410 NPRN = std::min(NPRN + 2, 4u);
412 case BuiltinType::SveBoolx4:
413 NPRN = std::min(NPRN + 4, 4u);
415 case BuiltinType::MFloat8:
416 NSRN = std::min(NSRN + 1, 8u);
419 if (BT->isSVESizelessBuiltinType())
421 NSRN + getContext().getBuiltinVectorTypeInfo(BT).NumVectors,
427 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
435 return getNaturalAlignIndirect(
436 Ty, getDataLayout().getAllocaAddrSpace(),
451 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
464 bool IsWin64 =
Kind == AArch64ABIKind::Win64 ||
465 CallingConvention == llvm::CallingConv::Win64;
466 bool IsWinVariadic = IsWin64 && IsVariadicFn;
469 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
470 NSRN = std::min(NSRN + Members,
uint64_t(8));
471 if (Kind != AArch64ABIKind::AAPCS)
473 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
478 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
479 Align = (Align >= 16) ? 16 : 8;
481 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
482 nullptr,
true, Align);
487 if (Kind == AArch64ABIKind::AAPCS) {
488 unsigned NVec = 0, NPred = 0;
489 SmallVector<llvm::Type *> UnpaddedCoerceToSeq;
490 if (passAsPureScalableType(Ty, NVec, NPred, UnpaddedCoerceToSeq) &&
492 return coerceAndExpandPureScalableAggregate(
493 Ty, IsNamedArg, NVec, NPred, UnpaddedCoerceToSeq, NSRN, NPRN);
499 if (Kind == AArch64ABIKind::AAPCS) {
500 Alignment = getContext().getTypeUnadjustedAlign(Ty);
501 Alignment = Alignment < 128 ? 64 : 128;
504 std::max(getContext().getTypeAlign(Ty),
505 (
unsigned)getTarget().getPointerWidth(LangAS::Default));
507 Size = llvm::alignTo(Size, Alignment);
512 auto ContainsOnlyPointers = [&](
const auto &
Self, QualType Ty) {
518 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
519 for (
const auto &I : CXXRD->bases())
523 return all_of(RD->fields(), [&](FieldDecl *FD) {
524 QualType FDTy = FD->getType();
525 if (FDTy->isArrayType())
526 FDTy = getContext().getBaseElementType(FDTy);
527 return (FDTy->isPointerOrReferenceType() &&
528 getContext().getTypeSize(FDTy) == 64 &&
529 !FDTy->getPointeeType().hasAddressSpace()) ||
536 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
537 if ((Size == 64 || Size == 128) && Alignment == 64 &&
538 ContainsOnlyPointers(ContainsOnlyPointers, Ty))
539 BaseTy = llvm::PointerType::getUnqual(getVMContext());
541 Size == Alignment ? BaseTy
542 : llvm::ArrayType::get(BaseTy, Size / Alignment));
545 return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
549ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
550 bool IsVariadicFn)
const {
554 if (
const auto *VT = RetTy->
getAs<VectorType>()) {
556 VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
557 unsigned NSRN = 0, NPRN = 0;
558 return coerceIllegalVector(RetTy, NSRN, NPRN);
563 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
564 return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
566 if (!passAsAggregateType(RetTy)) {
569 RetTy = ED->getIntegerType();
571 if (
const auto *EIT = RetTy->
getAs<BitIntType>())
572 if (EIT->getNumBits() > 128)
573 return getNaturalAlignIndirect(RetTy,
574 getDataLayout().getAllocaAddrSpace());
576 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
588 if (isHomogeneousAggregate(RetTy, Base, Members) &&
589 !(getTarget().
getTriple().getArch() == llvm::Triple::aarch64_32 &&
597 if (Kind == AArch64ABIKind::AAPCS) {
598 unsigned NSRN = 0, NPRN = 0;
599 unsigned NVec = 0, NPred = 0;
600 SmallVector<llvm::Type *> UnpaddedCoerceToSeq;
601 if (passAsPureScalableType(RetTy, NVec, NPred, UnpaddedCoerceToSeq) &&
603 return coerceAndExpandPureScalableAggregate(
604 RetTy,
true, NVec, NPred, UnpaddedCoerceToSeq, NSRN,
610 if (Size <= 64 && getDataLayout().isLittleEndian()) {
618 llvm::IntegerType::get(getVMContext(), Size));
621 unsigned Alignment = getContext().getTypeAlign(RetTy);
622 Size = llvm::alignTo(Size, 64);
626 if (Alignment < 128 && Size == 128) {
627 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
633 return getNaturalAlignIndirect(RetTy, getDataLayout().getAllocaAddrSpace());
637bool AArch64ABIInfo::isIllegalVectorType(QualType Ty)
const {
638 if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
650 if (!llvm::isPowerOf2_32(NumElements))
655 llvm::Triple Triple = getTarget().getTriple();
656 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
657 Triple.isOSBinFormatMachO())
660 return Size != 64 && (
Size != 128 || NumElements == 1);
665bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
667 unsigned NumElts)
const {
668 if (!llvm::isPowerOf2_32(NumElts))
676bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty)
const {
686 if (
const BuiltinType *BT = Ty->
getAs<BuiltinType>()) {
687 if (BT->isFloatingPoint())
689 }
else if (
const VectorType *VT = Ty->
getAs<VectorType>()) {
691 Kind == VectorKind::SveFixedLengthData ||
692 Kind == VectorKind::SveFixedLengthPredicate)
695 unsigned VecSize = getContext().getTypeSize(VT);
696 if (VecSize == 64 || VecSize == 128)
702bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
703 uint64_t Members)
const {
707bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
717bool AArch64ABIInfo::passAsAggregateType(QualType Ty)
const {
719 const auto *BT = Ty->
castAs<BuiltinType>();
720 return !BT->isSVECount() &&
721 getContext().getBuiltinVectorTypeInfo(BT).NumVectors > 1;
734bool AArch64ABIInfo::passAsPureScalableType(
735 QualType Ty,
unsigned &NVec,
unsigned &NPred,
736 SmallVectorImpl<llvm::Type *> &CoerceToSeq)
const {
737 if (
const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
742 unsigned NV = 0, NP = 0;
743 SmallVector<llvm::Type *> EltCoerceToSeq;
744 if (!passAsPureScalableType(AT->getElementType(), NV, NP, EltCoerceToSeq))
747 if (CoerceToSeq.size() + NElt * EltCoerceToSeq.size() > 12)
750 for (uint64_t I = 0; I < NElt; ++I)
751 llvm::append_range(CoerceToSeq, EltCoerceToSeq);
765 const RecordDecl *RD = RT->getDecl()->getDefinitionOrSelf();
770 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
771 for (
const auto &I : CXXRD->bases()) {
774 if (!passAsPureScalableType(I.getType(), NVec, NPred, CoerceToSeq))
780 for (
const auto *FD : RD->
fields()) {
781 QualType FT = FD->getType();
784 if (!passAsPureScalableType(FT, NVec, NPred, CoerceToSeq))
791 if (
const auto *VT = Ty->
getAs<VectorType>()) {
792 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
794 if (CoerceToSeq.size() + 1 > 12)
796 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
802 if (CoerceToSeq.size() + 1 > 12)
804 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
815 switch (Ty->
castAs<BuiltinType>()->getKind()) {
816#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
817 case BuiltinType::Id: \
818 isPredicate = false; \
820#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
821 case BuiltinType::Id: \
822 isPredicate = true; \
824#include "clang/Basic/AArch64ACLETypes.def"
829 ASTContext::BuiltinVectorTypeInfo Info =
832 "Expected 1, 2, 3 or 4 vectors!");
838 ? llvm::Type::getInt8Ty(getVMContext())
839 : CGT.ConvertType(Info.ElementType);
840 auto *VTy = llvm::ScalableVectorType::get(EltTy, Info.
EC.getKnownMinValue());
842 if (CoerceToSeq.size() + Info.
NumVectors > 12)
844 std::fill_n(std::back_inserter(CoerceToSeq), Info.
NumVectors, VTy);
852void AArch64ABIInfo::flattenType(
853 llvm::Type *Ty, SmallVectorImpl<llvm::Type *> &Flattened)
const {
856 Flattened.push_back(Ty);
860 if (
const auto *AT = dyn_cast<llvm::ArrayType>(Ty)) {
861 uint64_t NElt = AT->getNumElements();
865 SmallVector<llvm::Type *> EltFlattened;
866 flattenType(AT->getElementType(), EltFlattened);
868 for (uint64_t I = 0; I < NElt; ++I)
869 llvm::append_range(Flattened, EltFlattened);
873 if (
const auto *ST = dyn_cast<llvm::StructType>(Ty)) {
874 for (
auto *ET : ST->elements())
875 flattenType(ET, Flattened);
879 Flattened.push_back(Ty);
882RValue AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
884 AggValueSlot Slot)
const {
888 unsigned NSRN = 0, NPRN = 0;
900 BaseTy = llvm::PointerType::getUnqual(BaseTy->getContext());
904 unsigned NumRegs = 1;
905 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
906 BaseTy = ArrTy->getElementType();
907 NumRegs = ArrTy->getNumElements();
910 !isSoftFloat() && (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
928 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
929 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
932 llvm::Value *reg_offs =
nullptr;
934 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
940 RegSize = llvm::alignTo(RegSize, 8);
946 RegSize = 16 * NumRegs;
957 llvm::Value *UsingStack =
nullptr;
958 UsingStack = CGF.
Builder.CreateICmpSGE(
959 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
961 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
970 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
973 reg_offs = CGF.
Builder.CreateAdd(
974 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
976 reg_offs = CGF.
Builder.CreateAnd(
977 reg_offs, llvm::ConstantInt::getSigned(CGF.
Int32Ty, -Align),
985 llvm::Value *NewOffset =
nullptr;
986 NewOffset = CGF.
Builder.CreateAdd(
987 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
992 llvm::Value *InRegs =
nullptr;
993 InRegs = CGF.
Builder.CreateICmpSLE(
994 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
996 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
1006 llvm::Value *reg_top =
nullptr;
1018 MemTy = llvm::PointerType::getUnqual(MemTy->getContext());
1023 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
1024 if (IsHFA && NumMembers > 1) {
1029 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
1030 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
1031 llvm::Type *BaseTy = CGF.
ConvertType(QualType(Base, 0));
1032 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
1034 std::max(TyAlign, BaseTyInfo.Align));
1039 BaseTyInfo.Width.getQuantity() < 16)
1040 Offset = 16 - BaseTyInfo.Width.getQuantity();
1042 for (
unsigned i = 0; i < NumMembers; ++i) {
1059 CharUnits SlotSize = BaseAddr.getAlignment();
1062 TySize < SlotSize) {
1063 CharUnits Offset = SlotSize - TySize;
1090 CharUnits StackSize;
1092 StackSize = StackSlotSize;
1094 StackSize = TySize.
alignTo(StackSlotSize);
1098 CGF.
Int8Ty, OnStackPtr, StackSizeC,
"new_stack");
1104 TySize < StackSlotSize) {
1105 CharUnits Offset = StackSlotSize - TySize;
1119 OnStackBlock,
"vaargs.addr");
1132RValue AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
1133 CodeGenFunction &CGF,
1134 AggValueSlot Slot)
const {
1144 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
1153 auto TyInfo = getContext().getTypeInfoInChars(Ty);
1157 bool IsIndirect =
false;
1158 if (TyInfo.Width.getQuantity() > 16) {
1161 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
1168RValue AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1169 QualType Ty, AggValueSlot Slot)
const {
1170 bool IsIndirect =
false;
1172 if (getTarget().
getTriple().isWindowsArm64EC()) {
1175 uint64_t Width = getContext().getTypeSize(Ty);
1176 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
1191 return T->getAArch64SMEAttributes() &
1199 const StringRef ABIName,
1200 const AArch64ABIInfo &
ABIInfo,
1203 const Type *HABase =
nullptr;
1204 uint64_t HAMembers = 0;
1207 Diags.
Report(loc, diag::err_target_unsupported_type_for_abi)
1215void AArch64TargetCodeGenInfo::checkFunctionABI(
1216 CodeGenModule &CGM,
const FunctionDecl *FuncDecl)
const {
1217 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
1218 const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
1220 if (!TI.
hasFeature(
"fp") && !ABIInfo.isSoftFloat()) {
1224 for (ParmVarDecl *PVD : FuncDecl->
parameters()) {
1248 bool CallerIsStreaming =
1250 bool CalleeIsStreaming =
1257 if (!CalleeIsStreamingCompatible &&
1258 (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible)) {
1259 if (CalleeIsStreaming)
1264 if (
auto *NewAttr = Callee->getAttr<ArmNewAttr>()) {
1265 if (NewAttr->isNewZA())
1267 if (NewAttr->isNewZT0())
1271 return Inlinability;
1274void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
1275 CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
1276 const FunctionDecl *Callee)
const {
1277 if (!Caller || !Callee || !
Callee->hasAttr<AlwaysInlineAttr>())
1288 ? diag::err_function_always_inline_attribute_mismatch
1289 : diag::warn_function_always_inline_attribute_mismatch)
1294 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_za)
1295 <<
Callee->getDeclName();
1299 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_zt0)
1300 <<
Callee->getDeclName();
1306void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
1307 CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
1308 const FunctionDecl *Callee,
const CallArgList &Args,
1309 QualType ReturnType)
const {
1310 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
1311 const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
1313 if (!Caller || TI.
hasFeature(
"fp") || ABIInfo.isSoftFloat())
1317 Callee ? Callee : Caller, CallLoc);
1319 for (
const CallArg &Arg : Args)
1321 Callee ? Callee : Caller, CallLoc);
1324void AArch64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
1325 SourceLocation CallLoc,
1326 const FunctionDecl *Caller,
1327 const FunctionDecl *Callee,
1328 const CallArgList &Args,
1329 QualType ReturnType)
const {
1330 checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
1331 checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
1334bool AArch64TargetCodeGenInfo::wouldInliningViolateFunctionCallABI(
1335 const FunctionDecl *Caller,
const FunctionDecl *Callee)
const {
1336 return Caller &&
Callee &&
1340void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
1342 raw_ostream &Out)
const {
1343 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
1346void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
1347 raw_ostream &Out)
const {
1348 if (AttrStr ==
"default") {
1354 SmallVector<StringRef, 8> Features;
1355 AttrStr.split(Features,
"+");
1356 for (
auto &Feat : Features)
1359 llvm::sort(Features, [](
const StringRef LHS,
const StringRef RHS) {
1360 return LHS.compare(RHS) < 0;
1363 llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
1364 for (
auto &Feat : Features)
1365 if (getTarget().doesFeatureAffectCodeGen(Feat))
1366 if (
auto Ext = llvm::AArch64::parseFMVExtension(Feat))
1367 if (UniqueFeats.insert(Ext->Name).second)
1368 Out <<
'M' << Ext->Name;
1371std::unique_ptr<TargetCodeGenInfo>
1374 return std::make_unique<AArch64TargetCodeGenInfo>(CGM, Kind);
1377std::unique_ptr<TargetCodeGenInfo>
1380 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM, K);
static bool isStreamingCompatible(const FunctionDecl *F)
@ ErrorCalleeRequiresNewZA
@ WarnIncompatibleStreamingModes
@ ErrorCalleeRequiresNewZT0
@ IncompatibleStreamingModes
@ LLVM_MARK_AS_BITMASK_ENUM
@ ErrorIncompatibleStreamingModes
static ArmSMEInlinability GetArmSMEInlinability(const FunctionDecl *Caller, const FunctionDecl *Callee)
Determines if there are any Arm SME ABI issues with inlining Callee into Caller.
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D, SourceLocation loc)
static StringRef getTriple(const Command &Job)
TypeInfoChars getTypeInfoInChars(const Type *T) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
static ABIArgInfo getIgnore()
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::ConstantInt * getSize(CharUnits N)
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
RequiredArgs getRequiredArgs() const
llvm::Type * ConvertType(QualType T)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
const TargetInfo & getTarget() const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const CodeGenOptions & getCodeGenOpts() const
unsigned getNumRequiredArgs() const
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Decl - This represents one declaration (or definition), e.g.
SourceLocation getLocation() const
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Represents a function declaration or definition.
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
Represents a prototype with parameter type info, e.g.
@ SME_PStateSMCompatibleMask
This represents a decl that may have a name.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
A (possibly-)qualified type.
field_range fields() const
Encodes a location in the source.
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, const LangOptions &LO, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
virtual StringRef getABI() const
Get the ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
The base class of the type hierarchy.
bool isMFloat8Type() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isSVESizelessBuiltinType() const
Returns true for SVE scalable vector types.
const T * castAs() const
Member-template castAs<specific type>.
bool isBuiltinType() const
Helper methods to distinguish type categories.
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isVectorType() const
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
unsigned getNumElements() const
VectorKind getVectorKind() const
QualType getElementType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
bool isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Is the given vector type "legal" for Swift's perspective on the current platform?
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
@ Address
A pointer to a ValueDecl.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
@ Type
The name was classified as a type.
U cast(CodeGen::Address addr)
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty