clang 19.0.0git
AArch64.cpp
Go to the documentation of this file.
1//===- AArch64.cpp --------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
12#include "llvm/TargetParser/AArch64TargetParser.h"
13
14using namespace clang;
15using namespace clang::CodeGen;
16
17//===----------------------------------------------------------------------===//
18// AArch64 ABI Implementation
19//===----------------------------------------------------------------------===//
20
21namespace {
22
23class AArch64ABIInfo : public ABIInfo {
25
26public:
27 AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
28 : ABIInfo(CGT), Kind(Kind) {}
29
30 bool isSoftFloat() const { return Kind == AArch64ABIKind::AAPCSSoft; }
31
32private:
33 AArch64ABIKind getABIKind() const { return Kind; }
34 bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
35
36 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
37 ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
38 unsigned CallingConvention) const;
39 ABIArgInfo coerceIllegalVector(QualType Ty) const;
40 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
42 uint64_t Members) const override;
44
45 bool isIllegalVectorType(QualType Ty) const;
46
47 void computeInfo(CGFunctionInfo &FI) const override {
48 if (!::classifyReturnType(getCXXABI(), FI, *this))
49 FI.getReturnInfo() =
51
52 for (auto &it : FI.arguments())
53 it.info = classifyArgumentType(it.type, FI.isVariadic(),
55 }
56
57 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
58 CodeGenFunction &CGF) const;
59
60 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
61 AArch64ABIKind Kind) const;
62
64 QualType Ty) const override {
65 llvm::Type *BaseTy = CGF.ConvertType(Ty);
66 if (isa<llvm::ScalableVectorType>(BaseTy))
67 llvm::report_fatal_error("Passing SVE types to variadic functions is "
68 "currently not supported");
69
70 return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
71 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
72 : EmitAAPCSVAArg(VAListAddr, Ty, CGF, Kind);
73 }
74
76 QualType Ty) const override;
77
78 bool allowBFloatArgsAndRet() const override {
79 return getTarget().hasBFloat16Type();
80 }
81
83 void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
84 raw_ostream &Out) const override;
85 void appendAttributeMangling(StringRef AttrStr,
86 raw_ostream &Out) const override;
87};
88
89class AArch64SwiftABIInfo : public SwiftABIInfo {
90public:
91 explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
92 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
93
94 bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
95 unsigned NumElts) const override;
96};
97
98class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
99public:
100 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
101 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
102 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
103 }
104
105 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
106 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
107 }
108
109 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
110 return 31;
111 }
112
113 bool doesReturnSlotInterfereWithArgs() const override { return false; }
114
115 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
116 CodeGen::CodeGenModule &CGM) const override {
117 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
118 if (!FD)
119 return;
120
121 const auto *TA = FD->getAttr<TargetAttr>();
122 if (TA == nullptr)
123 return;
124
126 CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
127 if (Attr.BranchProtection.empty())
128 return;
129
131 StringRef Error;
132 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
133 Attr.CPU, BPI, Error);
134 assert(Error.empty());
135
136 auto *Fn = cast<llvm::Function>(GV);
137 Fn->addFnAttr("sign-return-address", BPI.getSignReturnAddrStr());
138
139 if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
140 Fn->addFnAttr("sign-return-address-key",
141 BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
142 ? "a_key"
143 : "b_key");
144 }
145
146 Fn->addFnAttr("branch-target-enforcement",
147 BPI.BranchTargetEnforcement ? "true" : "false");
148 Fn->addFnAttr("branch-protection-pauth-lr",
149 BPI.BranchProtectionPAuthLR ? "true" : "false");
150 Fn->addFnAttr("guarded-control-stack",
151 BPI.GuardedControlStack ? "true" : "false");
152 }
153
155 llvm::Type *Ty) const override {
156 if (CGF.getTarget().hasFeature("ls64")) {
157 auto *ST = dyn_cast<llvm::StructType>(Ty);
158 if (ST && ST->getNumElements() == 1) {
159 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
160 if (AT && AT->getNumElements() == 8 &&
161 AT->getElementType()->isIntegerTy(64))
162 return true;
163 }
164 }
166 }
167
169 const FunctionDecl *Decl) const override;
170
172 const FunctionDecl *Caller,
173 const FunctionDecl *Callee,
174 const CallArgList &Args) const override;
175};
176
177class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
178public:
179 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
180 : AArch64TargetCodeGenInfo(CGT, K) {}
181
182 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
183 CodeGen::CodeGenModule &CGM) const override;
184
185 void getDependentLibraryOption(llvm::StringRef Lib,
186 llvm::SmallString<24> &Opt) const override {
187 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
188 }
189
190 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
191 llvm::SmallString<32> &Opt) const override {
192 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
193 }
194};
195
196void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
197 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
198 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
199 if (GV->isDeclaration())
200 return;
201 addStackProbeTargetAttributes(D, GV, CGM);
202}
203}
204
205ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
206 assert(Ty->isVectorType() && "expected vector type!");
207
208 const auto *VT = Ty->castAs<VectorType>();
209 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
210 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
211 assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
212 BuiltinType::UChar &&
213 "unexpected builtin type for SVE predicate!");
214 return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
215 llvm::Type::getInt1Ty(getVMContext()), 16));
216 }
217
218 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) {
219 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
220
221 const auto *BT = VT->getElementType()->castAs<BuiltinType>();
222 llvm::ScalableVectorType *ResType = nullptr;
223 switch (BT->getKind()) {
224 default:
225 llvm_unreachable("unexpected builtin type for SVE vector!");
226 case BuiltinType::SChar:
227 case BuiltinType::UChar:
228 ResType = llvm::ScalableVectorType::get(
229 llvm::Type::getInt8Ty(getVMContext()), 16);
230 break;
231 case BuiltinType::Short:
232 case BuiltinType::UShort:
233 ResType = llvm::ScalableVectorType::get(
234 llvm::Type::getInt16Ty(getVMContext()), 8);
235 break;
236 case BuiltinType::Int:
237 case BuiltinType::UInt:
238 ResType = llvm::ScalableVectorType::get(
239 llvm::Type::getInt32Ty(getVMContext()), 4);
240 break;
241 case BuiltinType::Long:
242 case BuiltinType::ULong:
243 ResType = llvm::ScalableVectorType::get(
244 llvm::Type::getInt64Ty(getVMContext()), 2);
245 break;
246 case BuiltinType::Half:
247 ResType = llvm::ScalableVectorType::get(
248 llvm::Type::getHalfTy(getVMContext()), 8);
249 break;
250 case BuiltinType::Float:
251 ResType = llvm::ScalableVectorType::get(
252 llvm::Type::getFloatTy(getVMContext()), 4);
253 break;
254 case BuiltinType::Double:
255 ResType = llvm::ScalableVectorType::get(
256 llvm::Type::getDoubleTy(getVMContext()), 2);
257 break;
258 case BuiltinType::BFloat16:
259 ResType = llvm::ScalableVectorType::get(
260 llvm::Type::getBFloatTy(getVMContext()), 8);
261 break;
262 }
263 return ABIArgInfo::getDirect(ResType);
264 }
265
266 uint64_t Size = getContext().getTypeSize(Ty);
267 // Android promotes <2 x i8> to i16, not i32
268 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
269 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
270 return ABIArgInfo::getDirect(ResType);
271 }
272 if (Size <= 32) {
273 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
274 return ABIArgInfo::getDirect(ResType);
275 }
276 if (Size == 64) {
277 auto *ResType =
278 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
279 return ABIArgInfo::getDirect(ResType);
280 }
281 if (Size == 128) {
282 auto *ResType =
283 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
284 return ABIArgInfo::getDirect(ResType);
285 }
286 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
287}
288
290AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
291 unsigned CallingConvention) const {
293
294 // Handle illegal vector types here.
295 if (isIllegalVectorType(Ty))
296 return coerceIllegalVector(Ty);
297
298 if (!isAggregateTypeForABI(Ty)) {
299 // Treat an enum type as its underlying type.
300 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
301 Ty = EnumTy->getDecl()->getIntegerType();
302
303 if (const auto *EIT = Ty->getAs<BitIntType>())
304 if (EIT->getNumBits() > 128)
305 return getNaturalAlignIndirect(Ty);
306
307 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
310 }
311
312 // Structures with either a non-trivial destructor or a non-trivial
313 // copy constructor are always indirect.
314 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
315 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
317 }
318
319 // Empty records are always ignored on Darwin, but actually passed in C++ mode
320 // elsewhere for GNU compatibility.
321 uint64_t Size = getContext().getTypeSize(Ty);
322 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
323 if (IsEmpty || Size == 0) {
324 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
325 return ABIArgInfo::getIgnore();
326
327 // GNU C mode. The only argument that gets ignored is an empty one with size
328 // 0.
329 if (IsEmpty && Size == 0)
330 return ABIArgInfo::getIgnore();
331 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
332 }
333
334 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
335 const Type *Base = nullptr;
336 uint64_t Members = 0;
337 bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
338 CallingConvention == llvm::CallingConv::Win64;
339 bool IsWinVariadic = IsWin64 && IsVariadic;
340 // In variadic functions on Windows, all composite types are treated alike,
341 // no special handling of HFAs/HVAs.
342 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
343 if (Kind != AArch64ABIKind::AAPCS)
345 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
346
347 // For HFAs/HVAs, cap the argument alignment to 16, otherwise
348 // set it to 8 according to the AAPCS64 document.
349 unsigned Align =
350 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
351 Align = (Align >= 16) ? 16 : 8;
353 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
354 nullptr, true, Align);
355 }
356
357 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
358 if (Size <= 128) {
359 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
360 // same size and alignment.
361 if (getTarget().isRenderScriptTarget()) {
362 return coerceToIntArray(Ty, getContext(), getVMContext());
363 }
364 unsigned Alignment;
365 if (Kind == AArch64ABIKind::AAPCS) {
366 Alignment = getContext().getTypeUnadjustedAlign(Ty);
367 Alignment = Alignment < 128 ? 64 : 128;
368 } else {
369 Alignment =
370 std::max(getContext().getTypeAlign(Ty),
371 (unsigned)getTarget().getPointerWidth(LangAS::Default));
372 }
373 Size = llvm::alignTo(Size, Alignment);
374
375 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
376 // For aggregates with 16-byte alignment, we use i128.
377 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
379 Size == Alignment ? BaseTy
380 : llvm::ArrayType::get(BaseTy, Size / Alignment));
381 }
382
383 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
384}
385
386ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
387 bool IsVariadic) const {
388 if (RetTy->isVoidType())
389 return ABIArgInfo::getIgnore();
390
391 if (const auto *VT = RetTy->getAs<VectorType>()) {
392 if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
393 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
394 return coerceIllegalVector(RetTy);
395 }
396
397 // Large vector types should be returned via memory.
398 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
399 return getNaturalAlignIndirect(RetTy);
400
401 if (!isAggregateTypeForABI(RetTy)) {
402 // Treat an enum type as its underlying type.
403 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
404 RetTy = EnumTy->getDecl()->getIntegerType();
405
406 if (const auto *EIT = RetTy->getAs<BitIntType>())
407 if (EIT->getNumBits() > 128)
408 return getNaturalAlignIndirect(RetTy);
409
410 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
411 ? ABIArgInfo::getExtend(RetTy)
413 }
414
415 uint64_t Size = getContext().getTypeSize(RetTy);
416 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
417 return ABIArgInfo::getIgnore();
418
419 const Type *Base = nullptr;
420 uint64_t Members = 0;
421 if (isHomogeneousAggregate(RetTy, Base, Members) &&
422 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
423 IsVariadic))
424 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
425 return ABIArgInfo::getDirect();
426
427 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
428 if (Size <= 128) {
429 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
430 // same size and alignment.
431 if (getTarget().isRenderScriptTarget()) {
432 return coerceToIntArray(RetTy, getContext(), getVMContext());
433 }
434
435 if (Size <= 64 && getDataLayout().isLittleEndian()) {
436 // Composite types are returned in lower bits of a 64-bit register for LE,
437 // and in higher bits for BE. However, integer types are always returned
438 // in lower bits for both LE and BE, and they are not rounded up to
439 // 64-bits. We can skip rounding up of composite types for LE, but not for
440 // BE, otherwise composite types will be indistinguishable from integer
441 // types.
443 llvm::IntegerType::get(getVMContext(), Size));
444 }
445
446 unsigned Alignment = getContext().getTypeAlign(RetTy);
447 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
448
449 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
450 // For aggregates with 16-byte alignment, we use i128.
451 if (Alignment < 128 && Size == 128) {
452 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
453 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
454 }
455 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
456 }
457
458 return getNaturalAlignIndirect(RetTy);
459}
460
461/// isIllegalVectorType - check whether the vector type is legal for AArch64.
462bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
463 if (const VectorType *VT = Ty->getAs<VectorType>()) {
464 // Check whether VT is a fixed-length SVE vector. These types are
465 // represented as scalable vectors in function args/return and must be
466 // coerced from fixed vectors.
467 if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
468 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
469 return true;
470
471 // Check whether VT is legal.
472 unsigned NumElements = VT->getNumElements();
473 uint64_t Size = getContext().getTypeSize(VT);
474 // NumElements should be power of 2.
475 if (!llvm::isPowerOf2_32(NumElements))
476 return true;
477
478 // arm64_32 has to be compatible with the ARM logic here, which allows huge
479 // vectors for some reason.
480 llvm::Triple Triple = getTarget().getTriple();
481 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
482 Triple.isOSBinFormatMachO())
483 return Size <= 32;
484
485 return Size != 64 && (Size != 128 || NumElements == 1);
486 }
487 return false;
488}
489
490bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
491 llvm::Type *EltTy,
492 unsigned NumElts) const {
493 if (!llvm::isPowerOf2_32(NumElts))
494 return false;
495 if (VectorSize.getQuantity() != 8 &&
496 (VectorSize.getQuantity() != 16 || NumElts == 1))
497 return false;
498 return true;
499}
500
501bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
502 // For the soft-float ABI variant, no types are considered to be homogeneous
503 // aggregates.
504 if (Kind == AArch64ABIKind::AAPCSSoft)
505 return false;
506
507 // Homogeneous aggregates for AAPCS64 must have base types of a floating
508 // point type or a short-vector type. This is the same as the 32-bit ABI,
509 // but with the difference that any floating-point type is allowed,
510 // including __fp16.
511 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
512 if (BT->isFloatingPoint())
513 return true;
514 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
515 unsigned VecSize = getContext().getTypeSize(VT);
516 if (VecSize == 64 || VecSize == 128)
517 return true;
518 }
519 return false;
520}
521
522bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
523 uint64_t Members) const {
524 return Members <= 4;
525}
526
527bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
528 const {
529 // AAPCS64 says that the rule for whether something is a homogeneous
530 // aggregate is applied to the output of the data layout decision. So
531 // anything that doesn't affect the data layout also does not affect
532 // homogeneity. In particular, zero-length bitfields don't stop a struct
533 // being homogeneous.
534 return true;
535}
536
537Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
538 CodeGenFunction &CGF,
539 AArch64ABIKind Kind) const {
540 ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
542 // Empty records are ignored for parameter passing purposes.
543 if (AI.isIgnore()) {
544 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
545 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
546 VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
547 auto *Load = CGF.Builder.CreateLoad(VAListAddr);
548 return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
549 }
550
551 bool IsIndirect = AI.isIndirect();
552
553 llvm::Type *BaseTy = CGF.ConvertType(Ty);
554 if (IsIndirect)
555 BaseTy = llvm::PointerType::getUnqual(BaseTy);
556 else if (AI.getCoerceToType())
557 BaseTy = AI.getCoerceToType();
558
559 unsigned NumRegs = 1;
560 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
561 BaseTy = ArrTy->getElementType();
562 NumRegs = ArrTy->getNumElements();
563 }
564 bool IsFPR = Kind != AArch64ABIKind::AAPCSSoft &&
565 (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
566
567 // The AArch64 va_list type and handling is specified in the Procedure Call
568 // Standard, section B.4:
569 //
570 // struct {
571 // void *__stack;
572 // void *__gr_top;
573 // void *__vr_top;
574 // int __gr_offs;
575 // int __vr_offs;
576 // };
577
578 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
579 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
580 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
581 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
582
583 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
584 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
585
586 Address reg_offs_p = Address::invalid();
587 llvm::Value *reg_offs = nullptr;
588 int reg_top_index;
589 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
590 if (!IsFPR) {
591 // 3 is the field number of __gr_offs
592 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
593 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
594 reg_top_index = 1; // field number for __gr_top
595 RegSize = llvm::alignTo(RegSize, 8);
596 } else {
597 // 4 is the field number of __vr_offs.
598 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
599 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
600 reg_top_index = 2; // field number for __vr_top
601 RegSize = 16 * NumRegs;
602 }
603
604 //=======================================
605 // Find out where argument was passed
606 //=======================================
607
608 // If reg_offs >= 0 we're already using the stack for this type of
609 // argument. We don't want to keep updating reg_offs (in case it overflows,
610 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
611 // whatever they get).
612 llvm::Value *UsingStack = nullptr;
613 UsingStack = CGF.Builder.CreateICmpSGE(
614 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
615
616 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
617
618 // Otherwise, at least some kind of argument could go in these registers, the
619 // question is whether this particular type is too big.
620 CGF.EmitBlock(MaybeRegBlock);
621
622 // Integer arguments may need to correct register alignment (for example a
623 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
624 // align __gr_offs to calculate the potential address.
625 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
626 int Align = TyAlign.getQuantity();
627
628 reg_offs = CGF.Builder.CreateAdd(
629 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
630 "align_regoffs");
631 reg_offs = CGF.Builder.CreateAnd(
632 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
633 "aligned_regoffs");
634 }
635
636 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
637 // The fact that this is done unconditionally reflects the fact that
638 // allocating an argument to the stack also uses up all the remaining
639 // registers of the appropriate kind.
640 llvm::Value *NewOffset = nullptr;
641 NewOffset = CGF.Builder.CreateAdd(
642 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
643 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
644
645 // Now we're in a position to decide whether this argument really was in
646 // registers or not.
647 llvm::Value *InRegs = nullptr;
648 InRegs = CGF.Builder.CreateICmpSLE(
649 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
650
651 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
652
653 //=======================================
654 // Argument was in registers
655 //=======================================
656
657 // Now we emit the code for if the argument was originally passed in
658 // registers. First start the appropriate block:
659 CGF.EmitBlock(InRegBlock);
660
661 llvm::Value *reg_top = nullptr;
662 Address reg_top_p =
663 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
664 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
665 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
666 CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
667 Address RegAddr = Address::invalid();
668 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
669
670 if (IsIndirect) {
671 // If it's been passed indirectly (actually a struct), whatever we find from
672 // stored registers or on the stack will actually be a struct **.
673 MemTy = llvm::PointerType::getUnqual(MemTy);
674 }
675
676 const Type *Base = nullptr;
677 uint64_t NumMembers = 0;
678 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
679 if (IsHFA && NumMembers > 1) {
680 // Homogeneous aggregates passed in registers will have their elements split
681 // and stored 16-bytes apart regardless of size (they're notionally in qN,
682 // qN+1, ...). We reload and store into a temporary local variable
683 // contiguously.
684 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
685 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
686 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
687 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
688 Address Tmp = CGF.CreateTempAlloca(HFATy,
689 std::max(TyAlign, BaseTyInfo.Align));
690
691 // On big-endian platforms, the value will be right-aligned in its slot.
692 int Offset = 0;
693 if (CGF.CGM.getDataLayout().isBigEndian() &&
694 BaseTyInfo.Width.getQuantity() < 16)
695 Offset = 16 - BaseTyInfo.Width.getQuantity();
696
697 for (unsigned i = 0; i < NumMembers; ++i) {
698 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
699 Address LoadAddr =
700 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
701 LoadAddr = LoadAddr.withElementType(BaseTy);
702
703 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
704
705 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
706 CGF.Builder.CreateStore(Elem, StoreAddr);
707 }
708
709 RegAddr = Tmp.withElementType(MemTy);
710 } else {
711 // Otherwise the object is contiguous in memory.
712
713 // It might be right-aligned in its slot.
714 CharUnits SlotSize = BaseAddr.getAlignment();
715 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
716 (IsHFA || !isAggregateTypeForABI(Ty)) &&
717 TySize < SlotSize) {
718 CharUnits Offset = SlotSize - TySize;
719 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
720 }
721
722 RegAddr = BaseAddr.withElementType(MemTy);
723 }
724
725 CGF.EmitBranch(ContBlock);
726
727 //=======================================
728 // Argument was on the stack
729 //=======================================
730 CGF.EmitBlock(OnStackBlock);
731
732 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
733 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
734
735 // Again, stack arguments may need realignment. In this case both integer and
736 // floating-point ones might be affected.
737 if (!IsIndirect && TyAlign.getQuantity() > 8) {
738 int Align = TyAlign.getQuantity();
739
740 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
741
742 OnStackPtr = CGF.Builder.CreateAdd(
743 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
744 "align_stack");
745 OnStackPtr = CGF.Builder.CreateAnd(
746 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
747 "align_stack");
748
749 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
750 }
751 Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
752 std::max(CharUnits::fromQuantity(8), TyAlign));
753
754 // All stack slots are multiples of 8 bytes.
755 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
756 CharUnits StackSize;
757 if (IsIndirect)
758 StackSize = StackSlotSize;
759 else
760 StackSize = TySize.alignTo(StackSlotSize);
761
762 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
763 llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
764 CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
765
766 // Write the new value of __stack for the next call to va_arg
767 CGF.Builder.CreateStore(NewStack, stack_p);
768
769 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
770 TySize < StackSlotSize) {
771 CharUnits Offset = StackSlotSize - TySize;
772 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
773 }
774
775 OnStackAddr = OnStackAddr.withElementType(MemTy);
776
777 CGF.EmitBranch(ContBlock);
778
779 //=======================================
780 // Tidy up
781 //=======================================
782 CGF.EmitBlock(ContBlock);
783
784 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
785 OnStackBlock, "vaargs.addr");
786
787 if (IsIndirect)
788 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
789 TyAlign);
790
791 return ResAddr;
792}
793
794Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
795 CodeGenFunction &CGF) const {
796 // The backend's lowering doesn't support va_arg for aggregates or
797 // illegal vector types. Lower VAArg here for these cases and use
798 // the LLVM va_arg instruction for everything else.
799 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
800 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
801
802 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
803 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
804
805 // Empty records are ignored for parameter passing purposes.
806 if (isEmptyRecord(getContext(), Ty, true))
807 return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
808 CGF.ConvertTypeForMem(Ty), SlotSize);
809
810 // The size of the actual thing passed, which might end up just
811 // being a pointer for indirect types.
812 auto TyInfo = getContext().getTypeInfoInChars(Ty);
813
814 // Arguments bigger than 16 bytes which aren't homogeneous
815 // aggregates should be passed indirectly.
816 bool IsIndirect = false;
817 if (TyInfo.Width.getQuantity() > 16) {
818 const Type *Base = nullptr;
819 uint64_t Members = 0;
820 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
821 }
822
823 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
824 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
825}
826
827Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
828 QualType Ty) const {
829 bool IsIndirect = false;
830
831 // Composites larger than 16 bytes are passed by reference.
832 if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
833 IsIndirect = true;
834
835 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
838 /*allowHigherAlign*/ false);
839}
840
841static bool isStreaming(const FunctionDecl *F) {
842 if (F->hasAttr<ArmLocallyStreamingAttr>())
843 return true;
844 if (const auto *T = F->getType()->getAs<FunctionProtoType>())
846 return false;
847}
848
849static bool isStreamingCompatible(const FunctionDecl *F) {
850 if (const auto *T = F->getType()->getAs<FunctionProtoType>())
851 return T->getAArch64SMEAttributes() &
853 return false;
854}
855
856void AArch64TargetCodeGenInfo::checkFunctionABI(
857 CodeGenModule &CGM, const FunctionDecl *FuncDecl) const {
858 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
860
861 // If we are using a hard-float ABI, but do not have floating point
862 // registers, then report an error for any function arguments or returns
863 // which would be passed in floating-pint registers.
864 auto CheckType = [&CGM, &TI, &ABIInfo](const QualType &Ty,
865 const NamedDecl *D) {
866 const Type *HABase = nullptr;
867 uint64_t HAMembers = 0;
868 if (Ty->isFloatingType() || Ty->isVectorType() ||
869 ABIInfo.isHomogeneousAggregate(Ty, HABase, HAMembers)) {
870 CGM.getDiags().Report(D->getLocation(),
871 diag::err_target_unsupported_type_for_abi)
872 << D->getDeclName() << Ty << TI.getABI();
873 }
874 };
875
876 if (!TI.hasFeature("fp") && !ABIInfo.isSoftFloat()) {
877 CheckType(FuncDecl->getReturnType(), FuncDecl);
878 for (ParmVarDecl *PVD : FuncDecl->parameters()) {
879 CheckType(PVD->getType(), PVD);
880 }
881 }
882}
883
884void AArch64TargetCodeGenInfo::checkFunctionCallABI(
885 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
886 const FunctionDecl *Callee, const CallArgList &Args) const {
887 if (!Caller || !Callee || !Callee->hasAttr<AlwaysInlineAttr>())
888 return;
889
890 bool CallerIsStreaming = isStreaming(Caller);
891 bool CalleeIsStreaming = isStreaming(Callee);
892 bool CallerIsStreamingCompatible = isStreamingCompatible(Caller);
893 bool CalleeIsStreamingCompatible = isStreamingCompatible(Callee);
894
895 if (!CalleeIsStreamingCompatible &&
896 (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible))
897 CGM.getDiags().Report(CallLoc,
898 diag::err_function_always_inline_attribute_mismatch)
899 << Caller->getDeclName() << Callee->getDeclName() << "streaming";
900 if (auto *NewAttr = Callee->getAttr<ArmNewAttr>())
901 if (NewAttr->isNewZA())
902 CGM.getDiags().Report(CallLoc, diag::err_function_always_inline_new_za)
903 << Callee->getDeclName();
904}
905
906void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
907 unsigned Index,
908 raw_ostream &Out) const {
909 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
910}
911
912void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
913 raw_ostream &Out) const {
914 if (AttrStr == "default") {
915 Out << ".default";
916 return;
917 }
918
919 Out << "._";
921 AttrStr.split(Features, "+");
922 for (auto &Feat : Features)
923 Feat = Feat.trim();
924
925 llvm::sort(Features, [](const StringRef LHS, const StringRef RHS) {
926 return LHS.compare(RHS) < 0;
927 });
928
929 llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
930 for (auto &Feat : Features)
931 if (auto Ext = llvm::AArch64::parseArchExtension(Feat))
932 if (UniqueFeats.insert(Ext->Name).second)
933 Out << 'M' << Ext->Name;
934}
935
936std::unique_ptr<TargetCodeGenInfo>
938 AArch64ABIKind Kind) {
939 return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
940}
941
942std::unique_ptr<TargetCodeGenInfo>
944 AArch64ABIKind K) {
945 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
946}
static bool isStreamingCompatible(const FunctionDecl *F)
Definition: AArch64.cpp:849
static bool isStreaming(const FunctionDecl *F)
Definition: AArch64.cpp:841
TypeInfoChars getTypeInfoInChars(const Type *T) const
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:757
Attr - This represents one attribute.
Definition: Attr.h:42
A fixed int type of a specified bitwidth.
Definition: Type.h:7032
This class is used for builtin types like 'int'.
Definition: Type.h:2771
Kind getKind() const
Definition: Type.h:2813
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Definition: ABIInfo.h:45
virtual bool allowBFloatArgsAndRet() const
Definition: ABIInfo.h:56
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
Definition: ABIInfo.cpp:42
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
Definition: ABIInfo.cpp:61
CodeGen::CGCXXABI & getCXXABI() const
Definition: ABIInfo.cpp:18
ASTContext & getContext() const
Definition: ABIInfo.cpp:20
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
Definition: ABIInfo.cpp:47
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
Definition: ABIInfo.cpp:187
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
Definition: ABIInfo.cpp:51
const TargetInfo & getTarget() const
Definition: ABIInfo.cpp:30
virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const
Definition: ABIInfo.cpp:56
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
llvm::ConstantInt * getSize(CharUnits N)
Definition: CGBuilder.h:99
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:345
RecordArgABI
Specify how one should pass an argument of a record type.
Definition: CGCXXABI.h:150
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:158
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:258
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Type * ConvertTypeForMem(QualType T)
const TargetInfo & getTarget() const
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Type * ConvertType(QualType T)
const CGFunctionInfo * CurFnInfo
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
Target specific hooks for defining how a type should be passed or returned from functions with one of...
Definition: ABIInfo.h:128
virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const
Returns true if the given vector type is legal from Swift's calling convention perspective.
Definition: ABIInfo.cpp:278
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:46
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
Definition: TargetInfo.h:193
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
Definition: TargetInfo.h:205
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
Definition: TargetInfo.h:75
virtual void checkFunctionABI(CodeGenModule &CGM, const FunctionDecl *Decl) const
Any further codegen related checks that need to be done on a function signature in a target specific ...
Definition: TargetInfo.h:89
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:178
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition: TargetInfo.h:122
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
Definition: TargetInfo.h:94
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:85
T * getAttr() const
Definition: DeclBase.h:580
SourceLocation getLocation() const
Definition: DeclBase.h:446
bool hasAttr() const
Definition: DeclBase.h:584
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:5365
Represents a function declaration or definition.
Definition: Decl.h:1971
QualType getReturnType() const
Definition: Decl.h:2755
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2684
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4446
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
Definition: Type.h:4884
@ SME_PStateSMEnabledMask
Definition: Type.h:4307
@ SME_PStateSMCompatibleMask
Definition: Type.h:4308
This represents a decl that may have a name.
Definition: Decl.h:249
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:315
Represents a parameter to a function.
Definition: Decl.h:1761
A (possibly-)qualified type.
Definition: Type.h:738
Encodes a location in the source.
Exposes information about the current target.
Definition: TargetInfo.h:213
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
Definition: TargetInfo.cpp:538
virtual bool hasBFloat16Type() const
Determine whether the _BFloat16 type is supported on this target.
Definition: TargetInfo.h:678
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
Definition: TargetInfo.h:1451
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
Definition: TargetInfo.h:1427
The base class of the type hierarchy.
Definition: Type.h:1607
bool isVoidType() const
Definition: Type.h:7695
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7980
bool isVectorType() const
Definition: Type.h:7508
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:7913
QualType getType() const
Definition: Decl.h:717
Represents a GCC generic vector type.
Definition: Type.h:3759
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
Definition: ABIInfoImpl.cpp:79
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
Definition: AArch64.cpp:937
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
Definition: AArch64.cpp:943
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1385
The JSON file list parser is used to communicate input to InstallAPI.
@ CPlusPlus
Definition: LangStandard.h:55
const FunctionProtoType * T
unsigned long uint64_t
Definition: Format.h:5394
#define true
Definition: stdbool.h:21
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Contains information gathered from parsing the contents of TargetAttr.
Definition: TargetInfo.h:56
LangOptions::SignReturnAddressScopeKind SignReturnAddr
Definition: TargetInfo.h:1388
LangOptions::SignReturnAddressKeyKind SignKey
Definition: TargetInfo.h:1389
const char * getSignReturnAddrStr() const
Definition: TargetInfo.h:1396