clang 22.0.0git
RISCV.cpp
Go to the documentation of this file.
1//===- RISCV.cpp ----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11#include "llvm/IR/IntrinsicsRISCV.h"
12#include "llvm/TargetParser/RISCVTargetParser.h"
13
14using namespace clang;
15using namespace clang::CodeGen;
16
17//===----------------------------------------------------------------------===//
18// RISC-V ABI Implementation
19//===----------------------------------------------------------------------===//
20
21namespace {
22class RISCVABIInfo : public DefaultABIInfo {
23private:
24 // Size of the integer ('x') registers in bits.
25 unsigned XLen;
26 // Size of the floating point ('f') registers in bits. Note that the target
27 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
28 // with soft float ABI has FLen==0).
29 unsigned FLen;
30 const int NumArgGPRs;
31 const int NumArgFPRs;
32 const bool EABI;
33 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
34 llvm::Type *&Field1Ty,
35 CharUnits &Field1Off,
36 llvm::Type *&Field2Ty,
37 CharUnits &Field2Off) const;
38
39 bool detectVLSCCEligibleStruct(QualType Ty, unsigned ABIVLen,
40 llvm::Type *&VLSType) const;
41
42public:
43 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen,
44 bool EABI)
45 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen), NumArgGPRs(EABI ? 6 : 8),
46 NumArgFPRs(FLen != 0 ? 8 : 0), EABI(EABI) {}
47
48 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
49 // non-virtual, but computeInfo is virtual, so we overload it.
50 void computeInfo(CGFunctionInfo &FI) const override;
51
52 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
53 int &ArgFPRsLeft, unsigned ABIVLen) const;
54 ABIArgInfo classifyReturnType(QualType RetTy, unsigned ABIVLen) const;
55
56 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
57 AggValueSlot Slot) const override;
58
59 ABIArgInfo extendType(QualType Ty, llvm::Type *CoerceTy = nullptr) const;
60
61 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
62 CharUnits &Field1Off, llvm::Type *&Field2Ty,
63 CharUnits &Field2Off, int &NeededArgGPRs,
64 int &NeededArgFPRs) const;
65 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
66 CharUnits Field1Off,
67 llvm::Type *Field2Ty,
68 CharUnits Field2Off) const;
69
70 ABIArgInfo coerceVLSVector(QualType Ty, unsigned ABIVLen = 0) const;
71
73 void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
74 raw_ostream &Out) const override;
75 void appendAttributeMangling(StringRef AttrStr,
76 raw_ostream &Out) const override;
77 llvm::Value *createCoercedLoad(Address SrcAddr, const ABIArgInfo &AI,
78 CodeGenFunction &CGF) const override;
79 void createCoercedStore(llvm::Value *Val, Address DstAddr,
80 const ABIArgInfo &AI, bool DestIsVolatile,
81 CodeGenFunction &CGF) const override;
82};
83} // end anonymous namespace
84
85void RISCVABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
86 unsigned Index,
87 raw_ostream &Out) const {
88 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
89}
90
91void RISCVABIInfo::appendAttributeMangling(StringRef AttrStr,
92 raw_ostream &Out) const {
93 if (AttrStr == "default") {
94 Out << ".default";
95 return;
96 }
97
98 Out << '.';
99
100 SmallVector<StringRef, 8> Attrs;
101 AttrStr.split(Attrs, ';');
102
103 // Only consider the arch string.
104 StringRef ArchStr;
105 for (auto &Attr : Attrs) {
106 if (Attr.starts_with("arch="))
107 ArchStr = Attr;
108 }
109
110 // Extract features string.
111 SmallVector<StringRef, 8> Features;
112 ArchStr.consume_front("arch=");
113 ArchStr.split(Features, ',');
114
115 llvm::stable_sort(Features);
116
117 for (auto Feat : Features) {
118 Feat.consume_front("+");
119 Out << "_" << Feat;
120 }
121}
122
123void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
124 unsigned ABIVLen;
125 switch (FI.getExtInfo().getCC()) {
126 default:
127 ABIVLen = 0;
128 break;
129#define CC_VLS_CASE(ABI_VLEN) \
130 case CallingConv::CC_RISCVVLSCall_##ABI_VLEN: \
131 ABIVLen = ABI_VLEN; \
132 break;
133 CC_VLS_CASE(32)
134 CC_VLS_CASE(64)
135 CC_VLS_CASE(128)
136 CC_VLS_CASE(256)
137 CC_VLS_CASE(512)
138 CC_VLS_CASE(1024)
139 CC_VLS_CASE(2048)
140 CC_VLS_CASE(4096)
141 CC_VLS_CASE(8192)
142 CC_VLS_CASE(16384)
143 CC_VLS_CASE(32768)
144 CC_VLS_CASE(65536)
145#undef CC_VLS_CASE
146 }
147 QualType RetTy = FI.getReturnType();
148 if (!getCXXABI().classifyReturnType(FI))
149 FI.getReturnInfo() = classifyReturnType(RetTy, ABIVLen);
150
151 // IsRetIndirect is true if classifyArgumentType indicated the value should
152 // be passed indirect, or if the type size is a scalar greater than 2*XLen
153 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
154 // in LLVM IR, relying on the backend lowering code to rewrite the argument
155 // list and pass indirectly on RV32.
156 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
157 if (!IsRetIndirect && RetTy->isScalarType() &&
158 getContext().getTypeSize(RetTy) > (2 * XLen)) {
159 if (RetTy->isComplexType() && FLen) {
160 QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
161 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
162 } else {
163 // This is a normal scalar > 2*XLen, such as fp128 on RV32.
164 IsRetIndirect = true;
165 }
166 }
167
168 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
169 int ArgFPRsLeft = NumArgFPRs;
170 int NumFixedArgs = FI.getNumRequiredArgs();
171
172 int ArgNum = 0;
173 for (auto &ArgInfo : FI.arguments()) {
174 bool IsFixed = ArgNum < NumFixedArgs;
175 ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft,
176 ArgFPRsLeft, ABIVLen);
177 ArgNum++;
178 }
179}
180
181// Returns true if the struct is a potential candidate for the floating point
182// calling convention. If this function returns true, the caller is
183// responsible for checking that if there is only a single field then that
184// field is a float.
185bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
186 llvm::Type *&Field1Ty,
187 CharUnits &Field1Off,
188 llvm::Type *&Field2Ty,
189 CharUnits &Field2Off) const {
190 bool IsInt = Ty->isIntegralOrEnumerationType();
191 bool IsFloat = Ty->isRealFloatingType();
192
193 if (IsInt || IsFloat) {
194 uint64_t Size = getContext().getTypeSize(Ty);
195 if (IsInt && Size > XLen)
196 return false;
197 // Can't be eligible if larger than the FP registers. Handling of half
198 // precision values has been specified in the ABI, so don't block those.
199 if (IsFloat && Size > FLen)
200 return false;
201 // Can't be eligible if an integer type was already found (int+int pairs
202 // are not eligible).
203 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
204 return false;
205 if (!Field1Ty) {
206 Field1Ty = CGT.ConvertType(Ty);
207 Field1Off = CurOff;
208 return true;
209 }
210 if (!Field2Ty) {
211 Field2Ty = CGT.ConvertType(Ty);
212 Field2Off = CurOff;
213 return true;
214 }
215 return false;
216 }
217
218 if (auto CTy = Ty->getAs<ComplexType>()) {
219 if (Field1Ty)
220 return false;
221 QualType EltTy = CTy->getElementType();
222 if (getContext().getTypeSize(EltTy) > FLen)
223 return false;
224 Field1Ty = CGT.ConvertType(EltTy);
225 Field1Off = CurOff;
226 Field2Ty = Field1Ty;
227 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
228 return true;
229 }
230
231 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
232 uint64_t ArraySize = ATy->getZExtSize();
233 QualType EltTy = ATy->getElementType();
234 // Non-zero-length arrays of empty records make the struct ineligible for
235 // the FP calling convention in C++.
236 if (const auto *RTy = EltTy->getAsCanonical<RecordType>()) {
237 if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getOriginalDecl()) &&
238 isEmptyRecord(getContext(), EltTy, true, true))
239 return false;
240 }
241 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
242 for (uint64_t i = 0; i < ArraySize; ++i) {
243 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
244 Field1Off, Field2Ty, Field2Off);
245 if (!Ret)
246 return false;
247 CurOff += EltSize;
248 }
249 return true;
250 }
251
252 if (const auto *RTy = Ty->getAsCanonical<RecordType>()) {
253 // Structures with either a non-trivial destructor or a non-trivial
254 // copy constructor are not eligible for the FP calling convention.
255 if (getRecordArgABI(Ty, CGT.getCXXABI()))
256 return false;
257 if (isEmptyRecord(getContext(), Ty, true, true))
258 return true;
259 const RecordDecl *RD = RTy->getOriginalDecl()->getDefinitionOrSelf();
260 // Unions aren't eligible unless they're empty (which is caught above).
261 if (RD->isUnion())
262 return false;
263 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
264 // If this is a C++ record, check the bases first.
265 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
266 for (const CXXBaseSpecifier &B : CXXRD->bases()) {
267 const auto *BDecl = B.getType()->castAsCXXRecordDecl();
268 CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
269 bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
270 Field1Ty, Field1Off, Field2Ty,
271 Field2Off);
272 if (!Ret)
273 return false;
274 }
275 }
276 int ZeroWidthBitFieldCount = 0;
277 for (const FieldDecl *FD : RD->fields()) {
278 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
279 QualType QTy = FD->getType();
280 if (FD->isBitField()) {
281 unsigned BitWidth = FD->getBitWidthValue();
282 // Allow a bitfield with a type greater than XLen as long as the
283 // bitwidth is XLen or less.
284 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
285 QTy = getContext().getIntTypeForBitwidth(XLen, false);
286 if (BitWidth == 0) {
287 ZeroWidthBitFieldCount++;
288 continue;
289 }
290 }
291
292 bool Ret = detectFPCCEligibleStructHelper(
293 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
294 Field1Ty, Field1Off, Field2Ty, Field2Off);
295 if (!Ret)
296 return false;
297
298 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
299 // or int+fp structs, but are ignored for a struct with an fp field and
300 // any number of zero-width bitfields.
301 if (Field2Ty && ZeroWidthBitFieldCount > 0)
302 return false;
303 }
304 return Field1Ty != nullptr;
305 }
306
307 return false;
308}
309
310// Determine if a struct is eligible for passing according to the floating
311// point calling convention (i.e., when flattened it contains a single fp
312// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
313// NeededArgGPRs are incremented appropriately.
314bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
315 CharUnits &Field1Off,
316 llvm::Type *&Field2Ty,
317 CharUnits &Field2Off,
318 int &NeededArgGPRs,
319 int &NeededArgFPRs) const {
320 Field1Ty = nullptr;
321 Field2Ty = nullptr;
322 NeededArgGPRs = 0;
323 NeededArgFPRs = 0;
324 bool IsCandidate = detectFPCCEligibleStructHelper(
325 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
326 if (!Field1Ty)
327 return false;
328 // Not really a candidate if we have a single int but no float.
329 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
330 return false;
331 if (!IsCandidate)
332 return false;
333 if (Field1Ty && Field1Ty->isFloatingPointTy())
334 NeededArgFPRs++;
335 else if (Field1Ty)
336 NeededArgGPRs++;
337 if (Field2Ty && Field2Ty->isFloatingPointTy())
338 NeededArgFPRs++;
339 else if (Field2Ty)
340 NeededArgGPRs++;
341 return true;
342}
343
344// Call getCoerceAndExpand for the two-element flattened struct described by
345// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
346// appropriate coerceToType and unpaddedCoerceToType.
347ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
348 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
349 CharUnits Field2Off) const {
350 SmallVector<llvm::Type *, 3> CoerceElts;
351 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
352 if (!Field1Off.isZero())
353 CoerceElts.push_back(llvm::ArrayType::get(
354 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
355
356 CoerceElts.push_back(Field1Ty);
357 UnpaddedCoerceElts.push_back(Field1Ty);
358
359 if (!Field2Ty) {
361 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
362 UnpaddedCoerceElts[0]);
363 }
364
365 CharUnits Field2Align =
366 CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
367 CharUnits Field1End = Field1Off +
368 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
369 CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
370
371 CharUnits Padding = CharUnits::Zero();
372 if (Field2Off > Field2OffNoPadNoPack)
373 Padding = Field2Off - Field2OffNoPadNoPack;
374 else if (Field2Off != Field2Align && Field2Off > Field1End)
375 Padding = Field2Off - Field1End;
376
377 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
378
379 if (!Padding.isZero())
380 CoerceElts.push_back(llvm::ArrayType::get(
381 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
382
383 CoerceElts.push_back(Field2Ty);
384 UnpaddedCoerceElts.push_back(Field2Ty);
385
386 auto CoerceToType =
387 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
388 auto UnpaddedCoerceToType =
389 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
390
391 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
392}
393
394bool RISCVABIInfo::detectVLSCCEligibleStruct(QualType Ty, unsigned ABIVLen,
395 llvm::Type *&VLSType) const {
396 // No riscv_vls_cc attribute.
397 if (ABIVLen == 0)
398 return false;
399
400 // Legal struct for VLS calling convention should fulfill following rules:
401 // 1. Struct element should be either "homogeneous fixed-length vectors" or "a
402 // fixed-length vector array".
403 // 2. Number of struct elements or array elements should be greater or equal
404 // to 1 and less or equal to 8
405 // 3. Total number of vector registers needed should not exceed 8.
406 //
407 // Examples: Assume ABI_VLEN = 128.
408 // These are legal structs:
409 // a. Structs with 1~8 "same" fixed-length vectors, e.g.
410 // struct {
411 // __attribute__((vector_size(16))) int a;
412 // __attribute__((vector_size(16))) int b;
413 // }
414 //
415 // b. Structs with "single" fixed-length vector array with lengh 1~8, e.g.
416 // struct {
417 // __attribute__((vector_size(16))) int a[3];
418 // }
419 // These are illegal structs:
420 // a. Structs with 9 fixed-length vectors, e.g.
421 // struct {
422 // __attribute__((vector_size(16))) int a;
423 // __attribute__((vector_size(16))) int b;
424 // __attribute__((vector_size(16))) int c;
425 // __attribute__((vector_size(16))) int d;
426 // __attribute__((vector_size(16))) int e;
427 // __attribute__((vector_size(16))) int f;
428 // __attribute__((vector_size(16))) int g;
429 // __attribute__((vector_size(16))) int h;
430 // __attribute__((vector_size(16))) int i;
431 // }
432 //
433 // b. Structs with "multiple" fixed-length vector array, e.g.
434 // struct {
435 // __attribute__((vector_size(16))) int a[2];
436 // __attribute__((vector_size(16))) int b[2];
437 // }
438 //
439 // c. Vector registers needed exceeds 8, e.g.
440 // struct {
441 // // Registers needed for single fixed-length element:
442 // // 64 * 8 / ABI_VLEN = 4
443 // __attribute__((vector_size(64))) int a;
444 // __attribute__((vector_size(64))) int b;
445 // __attribute__((vector_size(64))) int c;
446 // __attribute__((vector_size(64))) int d;
447 // }
448 //
449 // 1. Struct of 1 fixed-length vector is passed as a scalable vector.
450 // 2. Struct of >1 fixed-length vectors are passed as vector tuple.
451 // 3. Struct of an array with 1 element of fixed-length vectors is passed as a
452 // scalable vector.
453 // 4. Struct of an array with >1 elements of fixed-length vectors is passed as
454 // vector tuple.
455 // 5. Otherwise, pass the struct indirectly.
456
457 llvm::StructType *STy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
458 if (!STy)
459 return false;
460
461 unsigned NumElts = STy->getStructNumElements();
462 if (NumElts > 8)
463 return false;
464
465 auto *FirstEltTy = STy->getElementType(0);
466 if (!STy->containsHomogeneousTypes())
467 return false;
468
469 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(FirstEltTy)) {
470 // Only struct of single array is accepted
471 if (NumElts != 1)
472 return false;
473 FirstEltTy = ArrayTy->getArrayElementType();
474 NumElts = ArrayTy->getNumElements();
475 }
476
477 auto *FixedVecTy = dyn_cast<llvm::FixedVectorType>(FirstEltTy);
478 if (!FixedVecTy)
479 return false;
480
481 // Check registers needed <= 8.
482 if (NumElts * llvm::divideCeil(
483 FixedVecTy->getNumElements() *
484 FixedVecTy->getElementType()->getScalarSizeInBits(),
485 ABIVLen) >
486 8)
487 return false;
488
489 // Turn them into scalable vector type or vector tuple type if legal.
490 if (NumElts == 1) {
491 // Handle single fixed-length vector.
492 VLSType = llvm::ScalableVectorType::get(
493 FixedVecTy->getElementType(),
494 llvm::divideCeil(FixedVecTy->getNumElements() *
495 llvm::RISCV::RVVBitsPerBlock,
496 ABIVLen));
497 return true;
498 }
499
500 // LMUL
501 // = fixed-length vector size / ABIVLen
502 // = 8 * I8EltCount / RVVBitsPerBlock
503 // =>
504 // I8EltCount
505 // = (fixed-length vector size * RVVBitsPerBlock) / (ABIVLen * 8)
506 unsigned I8EltCount =
507 llvm::divideCeil(FixedVecTy->getNumElements() *
508 FixedVecTy->getElementType()->getScalarSizeInBits() *
509 llvm::RISCV::RVVBitsPerBlock,
510 ABIVLen * 8);
511 VLSType = llvm::TargetExtType::get(
512 getVMContext(), "riscv.vector.tuple",
513 llvm::ScalableVectorType::get(llvm::Type::getInt8Ty(getVMContext()),
514 I8EltCount),
515 NumElts);
516 return true;
517}
518
519// Fixed-length RVV vectors are represented as scalable vectors in function
520// args/return and must be coerced from fixed vectors.
521ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty, unsigned ABIVLen) const {
522 assert(Ty->isVectorType() && "expected vector type!");
523
524 const auto *VT = Ty->castAs<VectorType>();
525 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
526
527 auto VScale = getContext().getTargetInfo().getVScaleRange(
528 getContext().getLangOpts(), TargetInfo::ArmStreamingKind::NotStreaming);
529
530 unsigned NumElts = VT->getNumElements();
531 llvm::Type *EltType = llvm::Type::getInt1Ty(getVMContext());
532 switch (VT->getVectorKind()) {
533 case VectorKind::RVVFixedLengthMask_1:
534 break;
535 case VectorKind::RVVFixedLengthMask_2:
536 NumElts *= 2;
537 break;
538 case VectorKind::RVVFixedLengthMask_4:
539 NumElts *= 4;
540 break;
541 case VectorKind::RVVFixedLengthMask:
542 NumElts *= 8;
543 break;
544 default:
545 assert((VT->getVectorKind() == VectorKind::Generic ||
546 VT->getVectorKind() == VectorKind::RVVFixedLengthData) &&
547 "Unexpected vector kind");
548 EltType = CGT.ConvertType(VT->getElementType());
549 }
550
551 llvm::ScalableVectorType *ResType;
552
553 if (ABIVLen == 0) {
554 // The MinNumElts is simplified from equation:
555 // NumElts / VScale =
556 // (EltSize * NumElts / (VScale * RVVBitsPerBlock))
557 // * (RVVBitsPerBlock / EltSize)
558 ResType = llvm::ScalableVectorType::get(EltType, NumElts / VScale->first);
559 } else {
560 // Check registers needed <= 8.
561 if ((EltType->getScalarSizeInBits() * NumElts / ABIVLen) > 8)
562 return getNaturalAlignIndirect(
563 Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(),
564 /*ByVal=*/false);
565
566 // Generic vector
567 // The number of elements needs to be at least 1.
568 ResType = llvm::ScalableVectorType::get(
569 EltType,
570 llvm::divideCeil(NumElts * llvm::RISCV::RVVBitsPerBlock, ABIVLen));
571
572 // If the corresponding extension is not supported, just make it an i8
573 // vector with same LMUL.
574 const TargetInfo &TI = getContext().getTargetInfo();
575 if ((EltType->isHalfTy() && !TI.hasFeature("zvfhmin")) ||
576 (EltType->isBFloatTy() && !TI.hasFeature("zvfbfmin")) ||
577 (EltType->isFloatTy() && !TI.hasFeature("zve32f")) ||
578 (EltType->isDoubleTy() && !TI.hasFeature("zve64d")) ||
579 (EltType->isIntegerTy(64) && !TI.hasFeature("zve64x")) ||
580 EltType->isIntegerTy(128)) {
581 // The number of elements needs to be at least 1.
582 ResType = llvm::ScalableVectorType::get(
583 llvm::Type::getInt8Ty(getVMContext()),
584 llvm::divideCeil(EltType->getScalarSizeInBits() * NumElts *
585 llvm::RISCV::RVVBitsPerBlock,
586 8 * ABIVLen));
587 }
588 }
589
590 return ABIArgInfo::getDirect(ResType);
591}
592
593ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
594 int &ArgGPRsLeft,
595 int &ArgFPRsLeft,
596 unsigned ABIVLen) const {
597 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
599
600 // Structures with either a non-trivial destructor or a non-trivial
601 // copy constructor are always passed indirectly.
602 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
603 if (ArgGPRsLeft)
604 ArgGPRsLeft -= 1;
605 return getNaturalAlignIndirect(
606 Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(),
607 /*ByVal=*/RAA == CGCXXABI::RAA_DirectInMemory);
608 }
609
610 uint64_t Size = getContext().getTypeSize(Ty);
611
612 // Ignore empty structs/unions whose size is zero. According to the calling
613 // convention empty structs/unions are required to be sized types in C++.
614 if (isEmptyRecord(getContext(), Ty, true) && Size == 0)
615 return ABIArgInfo::getIgnore();
616
617 // Pass floating point values via FPRs if possible.
618 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
619 FLen >= Size && ArgFPRsLeft) {
620 ArgFPRsLeft--;
621 return ABIArgInfo::getDirect();
622 }
623
624 // Complex types for the hard float ABI must be passed direct rather than
625 // using CoerceAndExpand.
626 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
627 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
628 if (getContext().getTypeSize(EltTy) <= FLen) {
629 ArgFPRsLeft -= 2;
630 return ABIArgInfo::getDirect();
631 }
632 }
633
634 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
635 llvm::Type *Field1Ty = nullptr;
636 llvm::Type *Field2Ty = nullptr;
637 CharUnits Field1Off = CharUnits::Zero();
638 CharUnits Field2Off = CharUnits::Zero();
639 int NeededArgGPRs = 0;
640 int NeededArgFPRs = 0;
641 bool IsCandidate =
642 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
643 NeededArgGPRs, NeededArgFPRs);
644 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
645 NeededArgFPRs <= ArgFPRsLeft) {
646 ArgGPRsLeft -= NeededArgGPRs;
647 ArgFPRsLeft -= NeededArgFPRs;
648 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
649 Field2Off);
650 }
651 }
652
653 if (IsFixed && Ty->isStructureOrClassType()) {
654 llvm::Type *VLSType = nullptr;
655 if (detectVLSCCEligibleStruct(Ty, ABIVLen, VLSType))
656 return ABIArgInfo::getTargetSpecific(VLSType);
657 }
658
659 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
660 // Determine the number of GPRs needed to pass the current argument
661 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
662 // register pairs, so may consume 3 registers.
663 // TODO: To be compatible with GCC's behaviors, we don't align registers
664 // currently if we are using ILP32E calling convention. This behavior may be
665 // changed when RV32E/ILP32E is ratified.
666 int NeededArgGPRs = 1;
667 if (!IsFixed && NeededAlign == 2 * XLen)
668 NeededArgGPRs = 2 + (EABI && XLen == 32 ? 0 : (ArgGPRsLeft % 2));
669 else if (Size > XLen && Size <= 2 * XLen)
670 NeededArgGPRs = 2;
671
672 if (NeededArgGPRs > ArgGPRsLeft) {
673 NeededArgGPRs = ArgGPRsLeft;
674 }
675
676 ArgGPRsLeft -= NeededArgGPRs;
677
678 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
679 // Treat an enum type as its underlying type.
680 if (const auto *ED = Ty->getAsEnumDecl())
681 Ty = ED->getIntegerType();
682
683 // All integral types are promoted to XLen width
684 if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
685 return extendType(Ty, CGT.ConvertType(Ty));
686 }
687
688 if (const auto *EIT = Ty->getAs<BitIntType>()) {
689 if (EIT->getNumBits() < XLen)
690 return extendType(Ty, CGT.ConvertType(Ty));
691 if (EIT->getNumBits() > 128 ||
692 (!getContext().getTargetInfo().hasInt128Type() &&
693 EIT->getNumBits() > 64))
694 return getNaturalAlignIndirect(
695 Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(),
696 /*ByVal=*/false);
697 }
698
699 return ABIArgInfo::getDirect();
700 }
701
702 // TODO: _BitInt is not handled yet in VLS calling convention since _BitInt
703 // ABI is also not merged yet in RISC-V:
704 // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/pull/419
705 if (const VectorType *VT = Ty->getAs<VectorType>();
706 VT && !VT->getElementType()->isBitIntType()) {
707 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
708 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
709 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
710 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
711 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
712 return coerceVLSVector(Ty);
713 if (VT->getVectorKind() == VectorKind::Generic && ABIVLen != 0)
714 // Generic vector without riscv_vls_cc should fall through and pass by
715 // reference.
716 return coerceVLSVector(Ty, ABIVLen);
717 }
718
719 // Aggregates which are <= 2*XLen will be passed in registers if possible,
720 // so coerce to integers.
721 if (Size <= 2 * XLen) {
722 unsigned Alignment = getContext().getTypeAlign(Ty);
723
724 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
725 // required, and a 2-element XLen array if only XLen alignment is required.
726 if (Size <= XLen) {
728 llvm::IntegerType::get(getVMContext(), XLen));
729 } else if (Alignment == 2 * XLen) {
731 llvm::IntegerType::get(getVMContext(), 2 * XLen));
732 } else {
733 return ABIArgInfo::getDirect(llvm::ArrayType::get(
734 llvm::IntegerType::get(getVMContext(), XLen), 2));
735 }
736 }
737 return getNaturalAlignIndirect(
738 Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(),
739 /*ByVal=*/false);
740}
741
742ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy,
743 unsigned ABIVLen) const {
744 if (RetTy->isVoidType())
745 return ABIArgInfo::getIgnore();
746
747 int ArgGPRsLeft = 2;
748 int ArgFPRsLeft = FLen ? 2 : 0;
749
750 // The rules for return and argument types are the same, so defer to
751 // classifyArgumentType.
752 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft, ArgFPRsLeft,
753 ABIVLen);
754}
755
756RValue RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
757 QualType Ty, AggValueSlot Slot) const {
758 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
759
760 // Empty records are ignored for parameter passing purposes.
761 if (isEmptyRecord(getContext(), Ty, true))
762 return Slot.asRValue();
763
764 auto TInfo = getContext().getTypeInfoInChars(Ty);
765
766 // TODO: To be compatible with GCC's behaviors, we force arguments with
767 // 2×XLEN-bit alignment and size at most 2×XLEN bits like `long long`,
768 // `unsigned long long` and `double` to have 4-byte alignment. This
769 // behavior may be changed when RV32E/ILP32E is ratified.
770 if (EABI && XLen == 32)
771 TInfo.Align = std::min(TInfo.Align, CharUnits::fromQuantity(4));
772
773 // Arguments bigger than 2*Xlen bytes are passed indirectly.
774 bool IsIndirect = TInfo.Width > 2 * SlotSize;
775
776 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, SlotSize,
777 /*AllowHigherAlign=*/true, Slot);
778}
779
780ABIArgInfo RISCVABIInfo::extendType(QualType Ty, llvm::Type *CoerceTy) const {
781 int TySize = getContext().getTypeSize(Ty);
782 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
783 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
784 return ABIArgInfo::getSignExtend(Ty, CoerceTy);
785 return ABIArgInfo::getExtend(Ty, CoerceTy);
786}
787
788llvm::Value *RISCVABIInfo::createCoercedLoad(Address Src, const ABIArgInfo &AI,
789 CodeGenFunction &CGF) const {
790 llvm::Type *Ty = AI.getCoerceToType();
791 llvm::Type *SrcTy = Src.getElementType();
792 llvm::StructType *SrcSTy = cast<llvm::StructType>(SrcTy);
793 assert((Ty->isScalableTy() || Ty->isTargetExtTy()) &&
794 "Only scalable vector type and vector tuple type are allowed for load "
795 "type.");
796 if (llvm::TargetExtType *TupTy = dyn_cast<llvm::TargetExtType>(Ty)) {
797 // In RISC-V VLS calling convention, struct of fixed vectors or struct of
798 // array of fixed vector of length >1 might be lowered using vector tuple
799 // type, we consider it as a valid load, e.g.
800 // struct i32x4x2 {
801 // __attribute__((vector_size(16))) int i;
802 // __attribute__((vector_size(16))) int i;
803 // };
804 // or
805 // struct i32x4 {
806 // __attribute__((vector_size(16))) int i[2];
807 // };
808 // is lowered to target("riscv.vector.tuple", <vscale x 8 x i8>, 2)
809 // when ABI_VLEN = 128 bits, please checkout
810 // clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
811 // for more information.
812 assert(TupTy->getName() == "riscv.vector.tuple");
813 llvm::Type *EltTy = TupTy->getTypeParameter(0);
814 unsigned NumElts = TupTy->getIntParameter(0);
815
816 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(SrcSTy->getElementType(0)))
817 Src = Src.withElementType(ArrayTy);
818
819 // Perform extract element and load
820 llvm::Value *TupleVal = llvm::PoisonValue::get(Ty);
821 auto *Load = CGF.Builder.CreateLoad(Src);
822 for (unsigned i = 0; i < NumElts; ++i) {
823 // Extract from struct
824 llvm::Value *ExtractFromLoad = CGF.Builder.CreateExtractValue(Load, i);
825 // Element in vector tuple type is always i8, so we need to cast back to
826 // it's original element type.
827 EltTy =
828 cast<llvm::ScalableVectorType>(llvm::VectorType::getWithSizeAndScalar(
829 cast<llvm::VectorType>(EltTy), ExtractFromLoad->getType()));
830 llvm::Value *VectorVal = llvm::PoisonValue::get(EltTy);
831 // Insert to scalable vector
832 VectorVal = CGF.Builder.CreateInsertVector(
833 EltTy, VectorVal, ExtractFromLoad, uint64_t(0), "cast.scalable");
834 // Insert scalable vector to vector tuple
835 llvm::Value *Idx = CGF.Builder.getInt32(i);
836 TupleVal =
837 CGF.Builder.CreateIntrinsic(llvm::Intrinsic::riscv_tuple_insert,
838 {Ty, EltTy}, {TupleVal, VectorVal, Idx});
839 }
840 return TupleVal;
841 }
842
843 // In RISC-V VLS calling convention, struct of fixed vector or struct of
844 // fixed vector array of length 1 might be lowered using scalable vector,
845 // we consider it as a valid load, e.g.
846 // struct i32x4 {
847 // __attribute__((vector_size(16))) int i;
848 // };
849 // or
850 // struct i32x4 {
851 // __attribute__((vector_size(16))) int i[1];
852 // };
853 // is lowered to <vscale x 2 x i32>
854 // when ABI_VLEN = 128 bits, please checkout
855 // clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
856 // for more information.
857 auto *ScalableDstTy = cast<llvm::ScalableVectorType>(Ty);
858 SrcTy = SrcSTy->getElementType(0);
859 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(SrcTy))
860 SrcTy = ArrayTy->getElementType();
861 Src = Src.withElementType(SrcTy);
862 [[maybe_unused]] auto *FixedSrcTy = cast<llvm::FixedVectorType>(SrcTy);
863 assert(ScalableDstTy->getElementType() == FixedSrcTy->getElementType());
864 auto *Load = CGF.Builder.CreateLoad(Src);
865 auto *VectorVal = llvm::PoisonValue::get(ScalableDstTy);
866 llvm::Value *Result = CGF.Builder.CreateInsertVector(
867 ScalableDstTy, VectorVal, Load, uint64_t(0), "cast.scalable");
868 return Result;
869}
870
871void RISCVABIInfo::createCoercedStore(llvm::Value *Val, Address Dst,
872 const ABIArgInfo &AI, bool DestIsVolatile,
873 CodeGenFunction &CGF) const {
874 llvm::Type *SrcTy = Val->getType();
875 llvm::StructType *DstSTy = cast<llvm::StructType>(Dst.getElementType());
876 assert((SrcTy->isScalableTy() || SrcTy->isTargetExtTy()) &&
877 "Only scalable vector type and vector tuple type are allowed for "
878 "store value.");
879 if (llvm::TargetExtType *TupTy = dyn_cast<llvm::TargetExtType>(SrcTy)) {
880 // In RISC-V VLS calling convention, struct of fixed vectors or struct
881 // of array of fixed vector of length >1 might be lowered using vector
882 // tuple type, we consider it as a valid load, e.g.
883 // struct i32x4x2 {
884 // __attribute__((vector_size(16))) int i;
885 // __attribute__((vector_size(16))) int i;
886 // };
887 // or
888 // struct i32x4 {
889 // __attribute__((vector_size(16))) int i[2];
890 // };
891 // is lowered to target("riscv.vector.tuple", <vscale x 8 x i8>, 2)
892 // when ABI_VLEN = 128 bits, please checkout
893 // clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
894 // for more information.
895 assert(TupTy->getName() == "riscv.vector.tuple");
896 llvm::Type *EltTy = TupTy->getTypeParameter(0);
897 unsigned NumElts = TupTy->getIntParameter(0);
898
899 llvm::Type *FixedVecTy = DstSTy->getElementType(0);
900 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(DstSTy->getElementType(0))) {
901 Dst = Dst.withElementType(ArrayTy);
902 FixedVecTy = ArrayTy->getArrayElementType();
903 }
904
905 // Perform extract element and store
906 for (unsigned i = 0; i < NumElts; ++i) {
907 // Element in vector tuple type is always i8, so we need to cast back
908 // to it's original element type.
909 EltTy =
910 cast<llvm::ScalableVectorType>(llvm::VectorType::getWithSizeAndScalar(
911 cast<llvm::VectorType>(EltTy), FixedVecTy));
912 // Extract scalable vector from tuple
913 llvm::Value *Idx = CGF.Builder.getInt32(i);
914 auto *TupleElement = CGF.Builder.CreateIntrinsic(
915 llvm::Intrinsic::riscv_tuple_extract, {EltTy, TupTy}, {Val, Idx});
916
917 // Extract fixed vector from scalable vector
918 auto *ExtractVec = CGF.Builder.CreateExtractVector(
919 FixedVecTy, TupleElement, uint64_t(0));
920 // Store fixed vector to corresponding address
921 Address EltPtr = Address::invalid();
922 if (Dst.getElementType()->isStructTy())
923 EltPtr = CGF.Builder.CreateStructGEP(Dst, i);
924 else
925 EltPtr = CGF.Builder.CreateConstArrayGEP(Dst, i);
926 auto *I = CGF.Builder.CreateStore(ExtractVec, EltPtr, DestIsVolatile);
927 CGF.addInstToCurrentSourceAtom(I, ExtractVec);
928 }
929 return;
930 }
931
932 // In RISC-V VLS calling convention, struct of fixed vector or struct of
933 // fixed vector array of length 1 might be lowered using scalable
934 // vector, we consider it as a valid load, e.g.
935 // struct i32x4 {
936 // __attribute__((vector_size(16))) int i;
937 // };
938 // or
939 // struct i32x4 {
940 // __attribute__((vector_size(16))) int i[1];
941 // };
942 // is lowered to <vscale x 2 x i32>
943 // when ABI_VLEN = 128 bits, please checkout
944 // clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
945 // for more information.
946 llvm::Type *EltTy = DstSTy->getElementType(0);
947 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(EltTy)) {
948 assert(ArrayTy->getNumElements() == 1);
949 EltTy = ArrayTy->getElementType();
950 }
951 auto *Coerced = CGF.Builder.CreateExtractVector(
952 cast<llvm::FixedVectorType>(EltTy), Val, uint64_t(0));
953 auto *I = CGF.Builder.CreateStore(Coerced, Dst, DestIsVolatile);
954 CGF.addInstToCurrentSourceAtom(I, Val);
955}
956
957namespace {
958class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
959public:
960 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
961 unsigned FLen, bool EABI)
962 : TargetCodeGenInfo(
963 std::make_unique<RISCVABIInfo>(CGT, XLen, FLen, EABI)) {
964 SwiftInfo =
965 std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
966 }
967
968 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
969 CodeGen::CodeGenModule &CGM) const override {
970 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
971 if (!FD) return;
972
973 auto *Fn = cast<llvm::Function>(GV);
974
975 if (CGM.getCodeGenOpts().CFProtectionReturn)
976 Fn->addFnAttr("hw-shadow-stack");
977
978 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
979 if (!Attr)
980 return;
981
982 StringRef Kind = "machine";
983 bool HasSiFiveCLICPreemptible = false;
984 bool HasSiFiveCLICStackSwap = false;
985 for (RISCVInterruptAttr::InterruptType type : Attr->interrupt()) {
986 switch (type) {
987 case RISCVInterruptAttr::machine:
988 // Do not update `Kind` because `Kind` is already "machine", or the
989 // kinds also contains SiFive types which need to be applied.
990 break;
991 case RISCVInterruptAttr::supervisor:
992 Kind = "supervisor";
993 break;
994 case RISCVInterruptAttr::rnmi:
995 Kind = "rnmi";
996 break;
997 case RISCVInterruptAttr::qcinest:
998 Kind = "qci-nest";
999 break;
1000 case RISCVInterruptAttr::qcinonest:
1001 Kind = "qci-nonest";
1002 break;
1003 // There are three different LLVM IR attribute values for SiFive CLIC
1004 // interrupt kinds, one for each kind and one extra for their combination.
1005 case RISCVInterruptAttr::SiFiveCLICPreemptible: {
1006 HasSiFiveCLICPreemptible = true;
1007 Kind = HasSiFiveCLICStackSwap ? "SiFive-CLIC-preemptible-stack-swap"
1008 : "SiFive-CLIC-preemptible";
1009 break;
1010 }
1011 case RISCVInterruptAttr::SiFiveCLICStackSwap: {
1012 HasSiFiveCLICStackSwap = true;
1013 Kind = HasSiFiveCLICPreemptible ? "SiFive-CLIC-preemptible-stack-swap"
1014 : "SiFive-CLIC-stack-swap";
1015 break;
1016 }
1017 }
1018 }
1019
1020 Fn->addFnAttr("interrupt", Kind);
1021 }
1022};
1023} // namespace
1024
1025std::unique_ptr<TargetCodeGenInfo>
1027 unsigned FLen, bool EABI) {
1028 return std::make_unique<RISCVTargetCodeGenInfo>(CGM.getTypes(), XLen, FLen,
1029 EABI);
1030}
#define CC_VLS_CASE(ABI_VLEN)
static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type)
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Attr - This represents one attribute.
Definition Attr.h:44
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Definition CharUnits.h:143
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
static ABIArgInfo getIgnore()
static ABIArgInfo getTargetSpecific(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T=nullptr)
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
Definition ABIInfo.cpp:186
static Address invalid()
Definition Address.h:176
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
RValue asRValue() const
Definition CGValue.h:666
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition CGBuilder.h:245
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:223
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
RecordArgABI
Specify how one should pass an argument of a record type.
Definition CGCXXABI.h:150
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition CGCXXABI.h:158
FunctionType::ExtInfo getExtInfo() const
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
This class organizes the cross-function state that is used while generating LLVM code.
const CodeGenOptions & getCodeGenOpts() const
DefaultABIInfo - The default implementation for ABI specific details.
Definition ABIInfoImpl.h:21
CallingConv getCC() const
Definition TypeBase.h:4630
field_range fields() const
Definition Decl.h:4512
bool isUnion() const
Definition Decl.h:3919
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
bool isVoidType() const
Definition TypeBase.h:8878
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
Definition Type.cpp:724
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9165
bool isScalarType() const
Definition TypeBase.h:8980
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:8996
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
Definition Type.h:53
bool isStructureOrClassType() const
Definition Type.cpp:706
bool isVectorType() const
Definition TypeBase.h:8661
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
Definition TypeBase.h:2921
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9098
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:145
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen, unsigned FLen, bool EABI)
Definition RISCV.cpp:1026
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1939
bool Ret(InterpState &S, CodePtr &PC)
Definition Interp.h:312
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Result
The result type of a method or function.
Definition TypeBase.h:905
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t