clang 18.0.0git
AArch64.cpp
Go to the documentation of this file.
1//===- AArch64.cpp --------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15//===----------------------------------------------------------------------===//
16// AArch64 ABI Implementation
17//===----------------------------------------------------------------------===//
18
19namespace {
20
21class AArch64ABIInfo : public ABIInfo {
23
24public:
25 AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
26 : ABIInfo(CGT), Kind(Kind) {}
27
28private:
29 AArch64ABIKind getABIKind() const { return Kind; }
30 bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
31
32 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
33 ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
34 unsigned CallingConvention) const;
35 ABIArgInfo coerceIllegalVector(QualType Ty) const;
36 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
38 uint64_t Members) const override;
40
41 bool isIllegalVectorType(QualType Ty) const;
42
43 void computeInfo(CGFunctionInfo &FI) const override {
44 if (!::classifyReturnType(getCXXABI(), FI, *this))
45 FI.getReturnInfo() =
47
48 for (auto &it : FI.arguments())
49 it.info = classifyArgumentType(it.type, FI.isVariadic(),
51 }
52
53 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
54 CodeGenFunction &CGF) const;
55
56 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
57 CodeGenFunction &CGF) const;
58
60 QualType Ty) const override {
61 llvm::Type *BaseTy = CGF.ConvertType(Ty);
62 if (isa<llvm::ScalableVectorType>(BaseTy))
63 llvm::report_fatal_error("Passing SVE types to variadic functions is "
64 "currently not supported");
65
66 return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
67 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
68 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
69 }
70
72 QualType Ty) const override;
73
74 bool allowBFloatArgsAndRet() const override {
75 return getTarget().hasBFloat16Type();
76 }
77};
78
79class AArch64SwiftABIInfo : public SwiftABIInfo {
80public:
81 explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
82 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
83
84 bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
85 unsigned NumElts) const override;
86};
87
88class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
89public:
90 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
91 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
92 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
93 }
94
95 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
96 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
97 }
98
99 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
100 return 31;
101 }
102
103 bool doesReturnSlotInterfereWithArgs() const override { return false; }
104
105 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
106 CodeGen::CodeGenModule &CGM) const override {
107 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
108 if (!FD)
109 return;
110
111 const auto *TA = FD->getAttr<TargetAttr>();
112 if (TA == nullptr)
113 return;
114
116 CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
117 if (Attr.BranchProtection.empty())
118 return;
119
121 StringRef Error;
122 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
123 Attr.CPU, BPI, Error);
124 assert(Error.empty());
125
126 auto *Fn = cast<llvm::Function>(GV);
127 static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
128 Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
129
130 if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
131 Fn->addFnAttr("sign-return-address-key",
132 BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
133 ? "a_key"
134 : "b_key");
135 }
136
137 Fn->addFnAttr("branch-target-enforcement",
138 BPI.BranchTargetEnforcement ? "true" : "false");
139 }
140
142 llvm::Type *Ty) const override {
143 if (CGF.getTarget().hasFeature("ls64")) {
144 auto *ST = dyn_cast<llvm::StructType>(Ty);
145 if (ST && ST->getNumElements() == 1) {
146 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
147 if (AT && AT->getNumElements() == 8 &&
148 AT->getElementType()->isIntegerTy(64))
149 return true;
150 }
151 }
153 }
154};
155
156class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
157public:
158 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
159 : AArch64TargetCodeGenInfo(CGT, K) {}
160
161 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
162 CodeGen::CodeGenModule &CGM) const override;
163
164 void getDependentLibraryOption(llvm::StringRef Lib,
165 llvm::SmallString<24> &Opt) const override {
166 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
167 }
168
169 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
170 llvm::SmallString<32> &Opt) const override {
171 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
172 }
173};
174
175void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
176 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
177 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
178 if (GV->isDeclaration())
179 return;
180 addStackProbeTargetAttributes(D, GV, CGM);
181}
182}
183
184ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
185 assert(Ty->isVectorType() && "expected vector type!");
186
187 const auto *VT = Ty->castAs<VectorType>();
188 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
189 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
190 assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
191 BuiltinType::UChar &&
192 "unexpected builtin type for SVE predicate!");
193 return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
194 llvm::Type::getInt1Ty(getVMContext()), 16));
195 }
196
197 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) {
198 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
199
200 const auto *BT = VT->getElementType()->castAs<BuiltinType>();
201 llvm::ScalableVectorType *ResType = nullptr;
202 switch (BT->getKind()) {
203 default:
204 llvm_unreachable("unexpected builtin type for SVE vector!");
205 case BuiltinType::SChar:
206 case BuiltinType::UChar:
207 ResType = llvm::ScalableVectorType::get(
208 llvm::Type::getInt8Ty(getVMContext()), 16);
209 break;
210 case BuiltinType::Short:
211 case BuiltinType::UShort:
212 ResType = llvm::ScalableVectorType::get(
213 llvm::Type::getInt16Ty(getVMContext()), 8);
214 break;
215 case BuiltinType::Int:
216 case BuiltinType::UInt:
217 ResType = llvm::ScalableVectorType::get(
218 llvm::Type::getInt32Ty(getVMContext()), 4);
219 break;
220 case BuiltinType::Long:
221 case BuiltinType::ULong:
222 ResType = llvm::ScalableVectorType::get(
223 llvm::Type::getInt64Ty(getVMContext()), 2);
224 break;
225 case BuiltinType::Half:
226 ResType = llvm::ScalableVectorType::get(
227 llvm::Type::getHalfTy(getVMContext()), 8);
228 break;
229 case BuiltinType::Float:
230 ResType = llvm::ScalableVectorType::get(
231 llvm::Type::getFloatTy(getVMContext()), 4);
232 break;
233 case BuiltinType::Double:
234 ResType = llvm::ScalableVectorType::get(
235 llvm::Type::getDoubleTy(getVMContext()), 2);
236 break;
237 case BuiltinType::BFloat16:
238 ResType = llvm::ScalableVectorType::get(
239 llvm::Type::getBFloatTy(getVMContext()), 8);
240 break;
241 }
242 return ABIArgInfo::getDirect(ResType);
243 }
244
245 uint64_t Size = getContext().getTypeSize(Ty);
246 // Android promotes <2 x i8> to i16, not i32
247 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
248 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
249 return ABIArgInfo::getDirect(ResType);
250 }
251 if (Size <= 32) {
252 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
253 return ABIArgInfo::getDirect(ResType);
254 }
255 if (Size == 64) {
256 auto *ResType =
257 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
258 return ABIArgInfo::getDirect(ResType);
259 }
260 if (Size == 128) {
261 auto *ResType =
262 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
263 return ABIArgInfo::getDirect(ResType);
264 }
265 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
266}
267
269AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
270 unsigned CallingConvention) const {
272
273 // Handle illegal vector types here.
274 if (isIllegalVectorType(Ty))
275 return coerceIllegalVector(Ty);
276
277 if (!isAggregateTypeForABI(Ty)) {
278 // Treat an enum type as its underlying type.
279 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
280 Ty = EnumTy->getDecl()->getIntegerType();
281
282 if (const auto *EIT = Ty->getAs<BitIntType>())
283 if (EIT->getNumBits() > 128)
284 return getNaturalAlignIndirect(Ty);
285
286 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
289 }
290
291 // Structures with either a non-trivial destructor or a non-trivial
292 // copy constructor are always indirect.
293 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
294 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
296 }
297
298 // Empty records are always ignored on Darwin, but actually passed in C++ mode
299 // elsewhere for GNU compatibility.
300 uint64_t Size = getContext().getTypeSize(Ty);
301 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
302 if (IsEmpty || Size == 0) {
303 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
304 return ABIArgInfo::getIgnore();
305
306 // GNU C mode. The only argument that gets ignored is an empty one with size
307 // 0.
308 if (IsEmpty && Size == 0)
309 return ABIArgInfo::getIgnore();
310 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
311 }
312
313 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
314 const Type *Base = nullptr;
315 uint64_t Members = 0;
316 bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
317 CallingConvention == llvm::CallingConv::Win64;
318 bool IsWinVariadic = IsWin64 && IsVariadic;
319 // In variadic functions on Windows, all composite types are treated alike,
320 // no special handling of HFAs/HVAs.
321 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
322 if (Kind != AArch64ABIKind::AAPCS)
324 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
325
326 // For HFAs/HVAs, cap the argument alignment to 16, otherwise
327 // set it to 8 according to the AAPCS64 document.
328 unsigned Align =
329 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
330 Align = (Align >= 16) ? 16 : 8;
332 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
333 nullptr, true, Align);
334 }
335
336 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
337 if (Size <= 128) {
338 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
339 // same size and alignment.
340 if (getTarget().isRenderScriptTarget()) {
341 return coerceToIntArray(Ty, getContext(), getVMContext());
342 }
343 unsigned Alignment;
344 if (Kind == AArch64ABIKind::AAPCS) {
345 Alignment = getContext().getTypeUnadjustedAlign(Ty);
346 Alignment = Alignment < 128 ? 64 : 128;
347 } else {
348 Alignment =
349 std::max(getContext().getTypeAlign(Ty),
350 (unsigned)getTarget().getPointerWidth(LangAS::Default));
351 }
352 Size = llvm::alignTo(Size, Alignment);
353
354 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
355 // For aggregates with 16-byte alignment, we use i128.
356 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
358 Size == Alignment ? BaseTy
359 : llvm::ArrayType::get(BaseTy, Size / Alignment));
360 }
361
362 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
363}
364
365ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
366 bool IsVariadic) const {
367 if (RetTy->isVoidType())
368 return ABIArgInfo::getIgnore();
369
370 if (const auto *VT = RetTy->getAs<VectorType>()) {
371 if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
372 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
373 return coerceIllegalVector(RetTy);
374 }
375
376 // Large vector types should be returned via memory.
377 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
378 return getNaturalAlignIndirect(RetTy);
379
380 if (!isAggregateTypeForABI(RetTy)) {
381 // Treat an enum type as its underlying type.
382 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
383 RetTy = EnumTy->getDecl()->getIntegerType();
384
385 if (const auto *EIT = RetTy->getAs<BitIntType>())
386 if (EIT->getNumBits() > 128)
387 return getNaturalAlignIndirect(RetTy);
388
389 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
390 ? ABIArgInfo::getExtend(RetTy)
392 }
393
394 uint64_t Size = getContext().getTypeSize(RetTy);
395 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
396 return ABIArgInfo::getIgnore();
397
398 const Type *Base = nullptr;
399 uint64_t Members = 0;
400 if (isHomogeneousAggregate(RetTy, Base, Members) &&
401 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
402 IsVariadic))
403 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
404 return ABIArgInfo::getDirect();
405
406 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
407 if (Size <= 128) {
408 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
409 // same size and alignment.
410 if (getTarget().isRenderScriptTarget()) {
411 return coerceToIntArray(RetTy, getContext(), getVMContext());
412 }
413
414 if (Size <= 64 && getDataLayout().isLittleEndian()) {
415 // Composite types are returned in lower bits of a 64-bit register for LE,
416 // and in higher bits for BE. However, integer types are always returned
417 // in lower bits for both LE and BE, and they are not rounded up to
418 // 64-bits. We can skip rounding up of composite types for LE, but not for
419 // BE, otherwise composite types will be indistinguishable from integer
420 // types.
422 llvm::IntegerType::get(getVMContext(), Size));
423 }
424
425 unsigned Alignment = getContext().getTypeAlign(RetTy);
426 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
427
428 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
429 // For aggregates with 16-byte alignment, we use i128.
430 if (Alignment < 128 && Size == 128) {
431 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
432 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
433 }
434 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
435 }
436
437 return getNaturalAlignIndirect(RetTy);
438}
439
440/// isIllegalVectorType - check whether the vector type is legal for AArch64.
441bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
442 if (const VectorType *VT = Ty->getAs<VectorType>()) {
443 // Check whether VT is a fixed-length SVE vector. These types are
444 // represented as scalable vectors in function args/return and must be
445 // coerced from fixed vectors.
446 if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
447 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
448 return true;
449
450 // Check whether VT is legal.
451 unsigned NumElements = VT->getNumElements();
452 uint64_t Size = getContext().getTypeSize(VT);
453 // NumElements should be power of 2.
454 if (!llvm::isPowerOf2_32(NumElements))
455 return true;
456
457 // arm64_32 has to be compatible with the ARM logic here, which allows huge
458 // vectors for some reason.
459 llvm::Triple Triple = getTarget().getTriple();
460 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
461 Triple.isOSBinFormatMachO())
462 return Size <= 32;
463
464 return Size != 64 && (Size != 128 || NumElements == 1);
465 }
466 return false;
467}
468
469bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
470 llvm::Type *EltTy,
471 unsigned NumElts) const {
472 if (!llvm::isPowerOf2_32(NumElts))
473 return false;
474 if (VectorSize.getQuantity() != 8 &&
475 (VectorSize.getQuantity() != 16 || NumElts == 1))
476 return false;
477 return true;
478}
479
480bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
481 // Homogeneous aggregates for AAPCS64 must have base types of a floating
482 // point type or a short-vector type. This is the same as the 32-bit ABI,
483 // but with the difference that any floating-point type is allowed,
484 // including __fp16.
485 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
486 if (BT->isFloatingPoint())
487 return true;
488 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
489 unsigned VecSize = getContext().getTypeSize(VT);
490 if (VecSize == 64 || VecSize == 128)
491 return true;
492 }
493 return false;
494}
495
496bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
497 uint64_t Members) const {
498 return Members <= 4;
499}
500
501bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
502 const {
503 // AAPCS64 says that the rule for whether something is a homogeneous
504 // aggregate is applied to the output of the data layout decision. So
505 // anything that doesn't affect the data layout also does not affect
506 // homogeneity. In particular, zero-length bitfields don't stop a struct
507 // being homogeneous.
508 return true;
509}
510
511Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
512 CodeGenFunction &CGF) const {
513 ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
515 // Empty records are ignored for parameter passing purposes.
516 if (AI.isIgnore()) {
517 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
518 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
519 VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
520 auto *Load = CGF.Builder.CreateLoad(VAListAddr);
521 return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
522 }
523
524 bool IsIndirect = AI.isIndirect();
525
526 llvm::Type *BaseTy = CGF.ConvertType(Ty);
527 if (IsIndirect)
528 BaseTy = llvm::PointerType::getUnqual(BaseTy);
529 else if (AI.getCoerceToType())
530 BaseTy = AI.getCoerceToType();
531
532 unsigned NumRegs = 1;
533 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
534 BaseTy = ArrTy->getElementType();
535 NumRegs = ArrTy->getNumElements();
536 }
537 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
538
539 // The AArch64 va_list type and handling is specified in the Procedure Call
540 // Standard, section B.4:
541 //
542 // struct {
543 // void *__stack;
544 // void *__gr_top;
545 // void *__vr_top;
546 // int __gr_offs;
547 // int __vr_offs;
548 // };
549
550 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
551 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
552 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
553 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
554
555 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
556 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
557
558 Address reg_offs_p = Address::invalid();
559 llvm::Value *reg_offs = nullptr;
560 int reg_top_index;
561 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
562 if (!IsFPR) {
563 // 3 is the field number of __gr_offs
564 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
565 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
566 reg_top_index = 1; // field number for __gr_top
567 RegSize = llvm::alignTo(RegSize, 8);
568 } else {
569 // 4 is the field number of __vr_offs.
570 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
571 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
572 reg_top_index = 2; // field number for __vr_top
573 RegSize = 16 * NumRegs;
574 }
575
576 //=======================================
577 // Find out where argument was passed
578 //=======================================
579
580 // If reg_offs >= 0 we're already using the stack for this type of
581 // argument. We don't want to keep updating reg_offs (in case it overflows,
582 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
583 // whatever they get).
584 llvm::Value *UsingStack = nullptr;
585 UsingStack = CGF.Builder.CreateICmpSGE(
586 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
587
588 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
589
590 // Otherwise, at least some kind of argument could go in these registers, the
591 // question is whether this particular type is too big.
592 CGF.EmitBlock(MaybeRegBlock);
593
594 // Integer arguments may need to correct register alignment (for example a
595 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
596 // align __gr_offs to calculate the potential address.
597 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
598 int Align = TyAlign.getQuantity();
599
600 reg_offs = CGF.Builder.CreateAdd(
601 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
602 "align_regoffs");
603 reg_offs = CGF.Builder.CreateAnd(
604 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
605 "aligned_regoffs");
606 }
607
608 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
609 // The fact that this is done unconditionally reflects the fact that
610 // allocating an argument to the stack also uses up all the remaining
611 // registers of the appropriate kind.
612 llvm::Value *NewOffset = nullptr;
613 NewOffset = CGF.Builder.CreateAdd(
614 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
615 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
616
617 // Now we're in a position to decide whether this argument really was in
618 // registers or not.
619 llvm::Value *InRegs = nullptr;
620 InRegs = CGF.Builder.CreateICmpSLE(
621 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
622
623 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
624
625 //=======================================
626 // Argument was in registers
627 //=======================================
628
629 // Now we emit the code for if the argument was originally passed in
630 // registers. First start the appropriate block:
631 CGF.EmitBlock(InRegBlock);
632
633 llvm::Value *reg_top = nullptr;
634 Address reg_top_p =
635 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
636 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
637 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
638 CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
639 Address RegAddr = Address::invalid();
640 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
641
642 if (IsIndirect) {
643 // If it's been passed indirectly (actually a struct), whatever we find from
644 // stored registers or on the stack will actually be a struct **.
645 MemTy = llvm::PointerType::getUnqual(MemTy);
646 }
647
648 const Type *Base = nullptr;
649 uint64_t NumMembers = 0;
650 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
651 if (IsHFA && NumMembers > 1) {
652 // Homogeneous aggregates passed in registers will have their elements split
653 // and stored 16-bytes apart regardless of size (they're notionally in qN,
654 // qN+1, ...). We reload and store into a temporary local variable
655 // contiguously.
656 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
657 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
658 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
659 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
660 Address Tmp = CGF.CreateTempAlloca(HFATy,
661 std::max(TyAlign, BaseTyInfo.Align));
662
663 // On big-endian platforms, the value will be right-aligned in its slot.
664 int Offset = 0;
665 if (CGF.CGM.getDataLayout().isBigEndian() &&
666 BaseTyInfo.Width.getQuantity() < 16)
667 Offset = 16 - BaseTyInfo.Width.getQuantity();
668
669 for (unsigned i = 0; i < NumMembers; ++i) {
670 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
671 Address LoadAddr =
672 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
673 LoadAddr = LoadAddr.withElementType(BaseTy);
674
675 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
676
677 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
678 CGF.Builder.CreateStore(Elem, StoreAddr);
679 }
680
681 RegAddr = Tmp.withElementType(MemTy);
682 } else {
683 // Otherwise the object is contiguous in memory.
684
685 // It might be right-aligned in its slot.
686 CharUnits SlotSize = BaseAddr.getAlignment();
687 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
688 (IsHFA || !isAggregateTypeForABI(Ty)) &&
689 TySize < SlotSize) {
690 CharUnits Offset = SlotSize - TySize;
691 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
692 }
693
694 RegAddr = BaseAddr.withElementType(MemTy);
695 }
696
697 CGF.EmitBranch(ContBlock);
698
699 //=======================================
700 // Argument was on the stack
701 //=======================================
702 CGF.EmitBlock(OnStackBlock);
703
704 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
705 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
706
707 // Again, stack arguments may need realignment. In this case both integer and
708 // floating-point ones might be affected.
709 if (!IsIndirect && TyAlign.getQuantity() > 8) {
710 int Align = TyAlign.getQuantity();
711
712 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
713
714 OnStackPtr = CGF.Builder.CreateAdd(
715 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
716 "align_stack");
717 OnStackPtr = CGF.Builder.CreateAnd(
718 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
719 "align_stack");
720
721 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
722 }
723 Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
724 std::max(CharUnits::fromQuantity(8), TyAlign));
725
726 // All stack slots are multiples of 8 bytes.
727 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
728 CharUnits StackSize;
729 if (IsIndirect)
730 StackSize = StackSlotSize;
731 else
732 StackSize = TySize.alignTo(StackSlotSize);
733
734 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
735 llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
736 CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
737
738 // Write the new value of __stack for the next call to va_arg
739 CGF.Builder.CreateStore(NewStack, stack_p);
740
741 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
742 TySize < StackSlotSize) {
743 CharUnits Offset = StackSlotSize - TySize;
744 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
745 }
746
747 OnStackAddr = OnStackAddr.withElementType(MemTy);
748
749 CGF.EmitBranch(ContBlock);
750
751 //=======================================
752 // Tidy up
753 //=======================================
754 CGF.EmitBlock(ContBlock);
755
756 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
757 OnStackBlock, "vaargs.addr");
758
759 if (IsIndirect)
760 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
761 TyAlign);
762
763 return ResAddr;
764}
765
766Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
767 CodeGenFunction &CGF) const {
768 // The backend's lowering doesn't support va_arg for aggregates or
769 // illegal vector types. Lower VAArg here for these cases and use
770 // the LLVM va_arg instruction for everything else.
771 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
772 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
773
774 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
775 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
776
777 // Empty records are ignored for parameter passing purposes.
778 if (isEmptyRecord(getContext(), Ty, true))
779 return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
780 CGF.ConvertTypeForMem(Ty), SlotSize);
781
782 // The size of the actual thing passed, which might end up just
783 // being a pointer for indirect types.
784 auto TyInfo = getContext().getTypeInfoInChars(Ty);
785
786 // Arguments bigger than 16 bytes which aren't homogeneous
787 // aggregates should be passed indirectly.
788 bool IsIndirect = false;
789 if (TyInfo.Width.getQuantity() > 16) {
790 const Type *Base = nullptr;
791 uint64_t Members = 0;
792 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
793 }
794
795 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
796 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
797}
798
799Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
800 QualType Ty) const {
801 bool IsIndirect = false;
802
803 // Composites larger than 16 bytes are passed by reference.
804 if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
805 IsIndirect = true;
806
807 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
810 /*allowHigherAlign*/ false);
811}
812
813std::unique_ptr<TargetCodeGenInfo>
815 AArch64ABIKind Kind) {
816 return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
817}
818
819std::unique_ptr<TargetCodeGenInfo>
821 AArch64ABIKind K) {
822 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
823}
TypeInfoChars getTypeInfoInChars(const Type *T) const
Attr - This represents one attribute.
Definition: Attr.h:41
A fixed int type of a specified bitwidth.
Definition: Type.h:6664
This class is used for builtin types like 'int'.
Definition: Type.h:2738
Kind getKind() const
Definition: Type.h:2780
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Definition: ABIInfo.h:44
virtual bool allowBFloatArgsAndRet() const
Definition: ABIInfo.h:55
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
Definition: ABIInfo.cpp:42
CodeGen::CGCXXABI & getCXXABI() const
Definition: ABIInfo.cpp:18
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
Definition: ABIInfo.cpp:47
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
Definition: ABIInfo.cpp:51
const TargetInfo & getTarget() const
Definition: ABIInfo.cpp:30
virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const
Definition: ABIInfo.cpp:56
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
An aligned address.
Definition: Address.h:29
static Address invalid()
Definition: Address.h:46
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:97
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:262
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:196
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:175
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
llvm::ConstantInt * getSize(CharUnits N)
Definition: CGBuilder.h:62
RecordArgABI
Specify how one should pass an argument of a record type.
Definition: CGCXXABI.h:154
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:162
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Type * ConvertTypeForMem(QualType T)
const TargetInfo & getTarget() const
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Type * ConvertType(QualType T)
const CGFunctionInfo * CurFnInfo
This class organizes the cross-function state that is used while generating LLVM code.
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
Target specific hooks for defining how a type should be passed or returned from functions with one of...
Definition: ABIInfo.h:118
virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const
Returns true if the given vector type is legal from Swift's calling convention perspective.
Definition: ABIInfo.cpp:226
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:46
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
Definition: TargetInfo.h:188
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
Definition: TargetInfo.h:200
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
Definition: TargetInfo.h:75
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:173
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition: TargetInfo.h:117
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:85
T * getAttr() const
Definition: DeclBase.h:577
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:4997
Represents a function declaration or definition.
Definition: Decl.h:1957
A (possibly-)qualified type.
Definition: Type.h:736
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
Definition: TargetInfo.cpp:537
virtual bool hasBFloat16Type() const
Determine whether the _BFloat16 type is supported on this target.
Definition: TargetInfo.h:675
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
Definition: TargetInfo.h:1410
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
Definition: TargetInfo.h:1386
The base class of the type hierarchy.
Definition: Type.h:1602
bool isVoidType() const
Definition: Type.h:7352
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7625
bool isVectorType() const
Definition: Type.h:7135
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:7558
Represents a GCC generic vector type.
Definition: Type.h:3507
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
Definition: ABIInfoImpl.cpp:79
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
Definition: AArch64.cpp:814
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
Definition: AArch64.cpp:820
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1286
@ CPlusPlus
Definition: LangStandard.h:54
unsigned long uint64_t
Definition: Format.h:5202
#define true
Definition: stdbool.h:21
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Contains information gathered from parsing the contents of TargetAttr.
Definition: TargetInfo.h:55
LangOptions::SignReturnAddressScopeKind SignReturnAddr
Definition: TargetInfo.h:1371
LangOptions::SignReturnAddressKeyKind SignKey
Definition: TargetInfo.h:1373