10#include "TargetInfo.h"
11#include "llvm/IR/IntrinsicsRISCV.h"
12#include "llvm/TargetParser/RISCVTargetParser.h"
33 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
34 llvm::Type *&Field1Ty,
36 llvm::Type *&Field2Ty,
37 CharUnits &Field2Off)
const;
39 bool detectVLSCCEligibleStruct(QualType Ty,
unsigned ABIVLen,
40 llvm::Type *&VLSType)
const;
43 RISCVABIInfo(CodeGen::CodeGenTypes &CGT,
unsigned XLen,
unsigned FLen,
45 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen), NumArgGPRs(EABI ? 6 : 8),
46 NumArgFPRs(FLen != 0 ? 8 : 0), EABI(EABI) {}
50 void computeInfo(CGFunctionInfo &FI)
const override;
53 int &ArgFPRsLeft,
unsigned ABIVLen)
const;
56 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
57 AggValueSlot Slot)
const override;
59 ABIArgInfo extendType(QualType Ty, llvm::Type *CoerceTy =
nullptr)
const;
61 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
62 CharUnits &Field1Off, llvm::Type *&Field2Ty,
63 CharUnits &Field2Off,
int &NeededArgGPRs,
64 int &NeededArgFPRs)
const;
65 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
68 CharUnits Field2Off)
const;
70 ABIArgInfo coerceVLSVector(QualType Ty,
unsigned ABIVLen = 0)
const;
73 void appendAttributeMangling(TargetClonesAttr *Attr,
unsigned Index,
74 raw_ostream &Out)
const override;
75 void appendAttributeMangling(StringRef AttrStr,
76 raw_ostream &Out)
const override;
77 llvm::Value *createCoercedLoad(Address SrcAddr,
const ABIArgInfo &AI,
78 CodeGenFunction &CGF)
const override;
79 void createCoercedStore(llvm::Value *Val, Address DstAddr,
80 const ABIArgInfo &AI,
bool DestIsVolatile,
81 CodeGenFunction &CGF)
const override;
85void RISCVABIInfo::appendAttributeMangling(TargetClonesAttr *
Attr,
87 raw_ostream &Out)
const {
88 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
91void RISCVABIInfo::appendAttributeMangling(StringRef AttrStr,
92 raw_ostream &Out)
const {
93 if (AttrStr ==
"default") {
100 SmallVector<StringRef, 8> Attrs;
101 AttrStr.split(Attrs,
';');
105 for (
auto &Attr : Attrs) {
106 if (Attr.starts_with(
"arch="))
111 SmallVector<StringRef, 8> Features;
112 ArchStr.consume_front(
"arch=");
113 ArchStr.split(Features,
',');
115 llvm::stable_sort(Features);
117 for (
auto Feat : Features) {
118 Feat.consume_front(
"+");
123void RISCVABIInfo::computeInfo(CGFunctionInfo &FI)
const {
129#define CC_VLS_CASE(ABI_VLEN) \
130 case CallingConv::CC_RISCVVLSCall_##ABI_VLEN: \
131 ABIVLen = ABI_VLEN; \
158 getContext().getTypeSize(RetTy) > (2 * XLen)) {
160 QualType EltTy = RetTy->
castAs<ComplexType>()->getElementType();
161 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
164 IsRetIndirect =
true;
168 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
169 int ArgFPRsLeft = NumArgFPRs;
174 bool IsFixed = ArgNum < NumFixedArgs;
176 ArgFPRsLeft, ABIVLen);
185bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
186 llvm::Type *&Field1Ty,
187 CharUnits &Field1Off,
188 llvm::Type *&Field2Ty,
189 CharUnits &Field2Off)
const {
193 if (IsInt || IsFloat) {
195 if (IsInt && Size > XLen)
199 if (IsFloat && Size > FLen)
203 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
206 Field1Ty = CGT.ConvertType(Ty);
211 Field2Ty = CGT.ConvertType(Ty);
218 if (
auto CTy = Ty->
getAs<ComplexType>()) {
221 QualType EltTy = CTy->getElementType();
222 if (getContext().getTypeSize(EltTy) > FLen)
224 Field1Ty = CGT.ConvertType(EltTy);
227 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
231 if (
const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
232 uint64_t ArraySize = ATy->getZExtSize();
233 QualType EltTy = ATy->getElementType();
241 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
242 for (uint64_t i = 0; i < ArraySize; ++i) {
243 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
244 Field1Off, Field2Ty, Field2Off);
259 const RecordDecl *RD = RTy->getOriginalDecl()->getDefinitionOrSelf();
263 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
265 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
266 for (
const CXXBaseSpecifier &B : CXXRD->bases()) {
267 const auto *BDecl = B.getType()->castAsCXXRecordDecl();
269 bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
270 Field1Ty, Field1Off, Field2Ty,
276 int ZeroWidthBitFieldCount = 0;
277 for (
const FieldDecl *FD : RD->
fields()) {
279 QualType QTy = FD->getType();
280 if (FD->isBitField()) {
281 unsigned BitWidth = FD->getBitWidthValue();
284 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
285 QTy = getContext().getIntTypeForBitwidth(XLen,
false);
287 ZeroWidthBitFieldCount++;
292 bool Ret = detectFPCCEligibleStructHelper(
293 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
294 Field1Ty, Field1Off, Field2Ty, Field2Off);
301 if (Field2Ty && ZeroWidthBitFieldCount > 0)
304 return Field1Ty !=
nullptr;
314bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
315 CharUnits &Field1Off,
316 llvm::Type *&Field2Ty,
317 CharUnits &Field2Off,
319 int &NeededArgFPRs)
const {
324 bool IsCandidate = detectFPCCEligibleStructHelper(
329 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
333 if (Field1Ty && Field1Ty->isFloatingPointTy())
337 if (Field2Ty && Field2Ty->isFloatingPointTy())
347ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
348 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
349 CharUnits Field2Off)
const {
350 SmallVector<llvm::Type *, 3> CoerceElts;
351 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
353 CoerceElts.push_back(llvm::ArrayType::get(
354 llvm::Type::getInt8Ty(getVMContext()), Field1Off.
getQuantity()));
356 CoerceElts.push_back(Field1Ty);
357 UnpaddedCoerceElts.push_back(Field1Ty);
361 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.
isZero()),
362 UnpaddedCoerceElts[0]);
365 CharUnits Field2Align =
367 CharUnits Field1End = Field1Off +
369 CharUnits Field2OffNoPadNoPack = Field1End.
alignTo(Field2Align);
372 if (Field2Off > Field2OffNoPadNoPack)
373 Padding = Field2Off - Field2OffNoPadNoPack;
374 else if (Field2Off != Field2Align && Field2Off > Field1End)
375 Padding = Field2Off - Field1End;
380 CoerceElts.push_back(llvm::ArrayType::get(
381 llvm::Type::getInt8Ty(getVMContext()), Padding.
getQuantity()));
383 CoerceElts.push_back(Field2Ty);
384 UnpaddedCoerceElts.push_back(Field2Ty);
387 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
388 auto UnpaddedCoerceToType =
389 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
394bool RISCVABIInfo::detectVLSCCEligibleStruct(QualType Ty,
unsigned ABIVLen,
395 llvm::Type *&VLSType)
const {
457 llvm::StructType *STy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
461 unsigned NumElts = STy->getStructNumElements();
465 auto *FirstEltTy = STy->getElementType(0);
466 if (!STy->containsHomogeneousTypes())
469 if (
auto *ArrayTy = dyn_cast<llvm::ArrayType>(FirstEltTy)) {
473 FirstEltTy = ArrayTy->getArrayElementType();
474 NumElts = ArrayTy->getNumElements();
477 auto *FixedVecTy = dyn_cast<llvm::FixedVectorType>(FirstEltTy);
482 if (NumElts * llvm::divideCeil(
483 FixedVecTy->getNumElements() *
484 FixedVecTy->getElementType()->getScalarSizeInBits(),
492 VLSType = llvm::ScalableVectorType::get(
493 FixedVecTy->getElementType(),
494 llvm::divideCeil(FixedVecTy->getNumElements() *
495 llvm::RISCV::RVVBitsPerBlock,
506 unsigned I8EltCount =
507 llvm::divideCeil(FixedVecTy->getNumElements() *
508 FixedVecTy->getElementType()->getScalarSizeInBits() *
509 llvm::RISCV::RVVBitsPerBlock,
511 VLSType = llvm::TargetExtType::get(
512 getVMContext(),
"riscv.vector.tuple",
513 llvm::ScalableVectorType::get(llvm::Type::getInt8Ty(getVMContext()),
521ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty,
unsigned ABIVLen)
const {
524 const auto *VT = Ty->
castAs<VectorType>();
525 assert(VT->getElementType()->isBuiltinType() &&
"expected builtin type!");
527 auto VScale = getContext().getTargetInfo().getVScaleRange(
528 getContext().getLangOpts(), TargetInfo::ArmStreamingKind::NotStreaming);
530 unsigned NumElts = VT->getNumElements();
531 llvm::Type *EltType = llvm::Type::getInt1Ty(getVMContext());
532 switch (VT->getVectorKind()) {
533 case VectorKind::RVVFixedLengthMask_1:
535 case VectorKind::RVVFixedLengthMask_2:
538 case VectorKind::RVVFixedLengthMask_4:
541 case VectorKind::RVVFixedLengthMask:
545 assert((VT->getVectorKind() == VectorKind::Generic ||
546 VT->getVectorKind() == VectorKind::RVVFixedLengthData) &&
547 "Unexpected vector kind");
548 EltType = CGT.ConvertType(VT->getElementType());
551 llvm::ScalableVectorType *ResType;
558 ResType = llvm::ScalableVectorType::get(EltType, NumElts / VScale->first);
561 if ((EltType->getScalarSizeInBits() * NumElts / ABIVLen) > 8)
562 return getNaturalAlignIndirect(
563 Ty, getDataLayout().getAllocaAddrSpace(),
568 ResType = llvm::ScalableVectorType::get(
570 llvm::divideCeil(NumElts * llvm::RISCV::RVVBitsPerBlock, ABIVLen));
574 const TargetInfo &TI = getContext().getTargetInfo();
575 if ((EltType->isHalfTy() && !TI.
hasFeature(
"zvfhmin")) ||
576 (EltType->isBFloatTy() && !TI.
hasFeature(
"zvfbfmin")) ||
577 (EltType->isFloatTy() && !TI.
hasFeature(
"zve32f")) ||
578 (EltType->isDoubleTy() && !TI.
hasFeature(
"zve64d")) ||
579 (EltType->isIntegerTy(64) && !TI.
hasFeature(
"zve64x")) ||
580 EltType->isIntegerTy(128)) {
582 ResType = llvm::ScalableVectorType::get(
583 llvm::Type::getInt8Ty(getVMContext()),
584 llvm::divideCeil(EltType->getScalarSizeInBits() * NumElts *
585 llvm::RISCV::RVVBitsPerBlock,
593ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty,
bool IsFixed,
596 unsigned ABIVLen)
const {
597 assert(ArgGPRsLeft <= NumArgGPRs &&
"Arg GPR tracking underflow");
605 return getNaturalAlignIndirect(
606 Ty, getDataLayout().getAllocaAddrSpace(),
619 FLen >= Size && ArgFPRsLeft) {
626 if (IsFixed && Ty->
isComplexType() && FLen && ArgFPRsLeft >= 2) {
627 QualType EltTy = Ty->
castAs<ComplexType>()->getElementType();
628 if (getContext().getTypeSize(EltTy) <= FLen) {
635 llvm::Type *Field1Ty =
nullptr;
636 llvm::Type *Field2Ty =
nullptr;
639 int NeededArgGPRs = 0;
640 int NeededArgFPRs = 0;
642 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
643 NeededArgGPRs, NeededArgFPRs);
644 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
645 NeededArgFPRs <= ArgFPRsLeft) {
646 ArgGPRsLeft -= NeededArgGPRs;
647 ArgFPRsLeft -= NeededArgFPRs;
648 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
654 llvm::Type *VLSType =
nullptr;
655 if (detectVLSCCEligibleStruct(Ty, ABIVLen, VLSType))
659 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
666 int NeededArgGPRs = 1;
667 if (!IsFixed && NeededAlign == 2 * XLen)
668 NeededArgGPRs = 2 + (EABI && XLen == 32 ? 0 : (ArgGPRsLeft % 2));
669 else if (Size > XLen && Size <= 2 * XLen)
672 if (NeededArgGPRs > ArgGPRsLeft) {
673 NeededArgGPRs = ArgGPRsLeft;
676 ArgGPRsLeft -= NeededArgGPRs;
681 Ty = ED->getIntegerType();
684 if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
685 return extendType(Ty, CGT.ConvertType(Ty));
688 if (
const auto *EIT = Ty->
getAs<BitIntType>()) {
689 if (EIT->getNumBits() < XLen)
690 return extendType(Ty, CGT.ConvertType(Ty));
691 if (EIT->getNumBits() > 128 ||
692 (!getContext().getTargetInfo().hasInt128Type() &&
693 EIT->getNumBits() > 64))
694 return getNaturalAlignIndirect(
695 Ty, getDataLayout().getAllocaAddrSpace(),
705 if (
const VectorType *VT = Ty->
getAs<VectorType>();
706 VT && !VT->getElementType()->isBitIntType()) {
707 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
708 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
709 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
710 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
711 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
712 return coerceVLSVector(Ty);
713 if (VT->getVectorKind() == VectorKind::Generic && ABIVLen != 0)
716 return coerceVLSVector(Ty, ABIVLen);
721 if (Size <= 2 * XLen) {
722 unsigned Alignment = getContext().getTypeAlign(Ty);
728 llvm::IntegerType::get(getVMContext(), XLen));
729 }
else if (Alignment == 2 * XLen) {
731 llvm::IntegerType::get(getVMContext(), 2 * XLen));
734 llvm::IntegerType::get(getVMContext(), XLen), 2));
737 return getNaturalAlignIndirect(
738 Ty, getDataLayout().getAllocaAddrSpace(),
742ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy,
743 unsigned ABIVLen)
const {
748 int ArgFPRsLeft = FLen ? 2 : 0;
756RValue RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
757 QualType Ty, AggValueSlot Slot)
const {
764 auto TInfo = getContext().getTypeInfoInChars(Ty);
770 if (EABI && XLen == 32)
774 bool IsIndirect = TInfo.Width > 2 * SlotSize;
780ABIArgInfo RISCVABIInfo::extendType(QualType Ty, llvm::Type *CoerceTy)
const {
781 int TySize = getContext().getTypeSize(Ty);
788llvm::Value *RISCVABIInfo::createCoercedLoad(Address Src,
const ABIArgInfo &AI,
789 CodeGenFunction &CGF)
const {
793 assert((Ty->isScalableTy() || Ty->isTargetExtTy()) &&
794 "Only scalable vector type and vector tuple type are allowed for load "
796 if (llvm::TargetExtType *TupTy = dyn_cast<llvm::TargetExtType>(Ty)) {
812 assert(TupTy->getName() ==
"riscv.vector.tuple");
813 llvm::Type *EltTy = TupTy->getTypeParameter(0);
814 unsigned NumElts = TupTy->getIntParameter(0);
816 if (
auto *ArrayTy = dyn_cast<llvm::ArrayType>(SrcSTy->getElementType(0)))
820 llvm::Value *TupleVal = llvm::PoisonValue::get(Ty);
822 for (
unsigned i = 0; i < NumElts; ++i) {
824 llvm::Value *ExtractFromLoad = CGF.
Builder.CreateExtractValue(Load, i);
830 llvm::Value *VectorVal = llvm::PoisonValue::get(EltTy);
832 VectorVal = CGF.
Builder.CreateInsertVector(
833 EltTy, VectorVal, ExtractFromLoad,
uint64_t(0),
"cast.scalable");
835 llvm::Value *Idx = CGF.
Builder.getInt32(i);
837 CGF.
Builder.CreateIntrinsic(llvm::Intrinsic::riscv_tuple_insert,
838 {Ty, EltTy}, {TupleVal, VectorVal, Idx});
858 SrcTy = SrcSTy->getElementType(0);
859 if (
auto *ArrayTy = dyn_cast<llvm::ArrayType>(SrcTy))
860 SrcTy = ArrayTy->getElementType();
863 assert(ScalableDstTy->getElementType() == FixedSrcTy->getElementType());
865 auto *VectorVal = llvm::PoisonValue::get(ScalableDstTy);
867 ScalableDstTy, VectorVal, Load,
uint64_t(0),
"cast.scalable");
871void RISCVABIInfo::createCoercedStore(llvm::Value *Val, Address Dst,
872 const ABIArgInfo &AI,
bool DestIsVolatile,
873 CodeGenFunction &CGF)
const {
874 llvm::Type *SrcTy = Val->getType();
876 assert((SrcTy->isScalableTy() || SrcTy->isTargetExtTy()) &&
877 "Only scalable vector type and vector tuple type are allowed for "
879 if (llvm::TargetExtType *TupTy = dyn_cast<llvm::TargetExtType>(SrcTy)) {
895 assert(TupTy->getName() ==
"riscv.vector.tuple");
896 llvm::Type *EltTy = TupTy->getTypeParameter(0);
897 unsigned NumElts = TupTy->getIntParameter(0);
899 llvm::Type *FixedVecTy = DstSTy->getElementType(0);
900 if (
auto *ArrayTy = dyn_cast<llvm::ArrayType>(DstSTy->getElementType(0))) {
902 FixedVecTy = ArrayTy->getArrayElementType();
906 for (
unsigned i = 0; i < NumElts; ++i) {
913 llvm::Value *Idx = CGF.
Builder.getInt32(i);
914 auto *TupleElement = CGF.
Builder.CreateIntrinsic(
915 llvm::Intrinsic::riscv_tuple_extract, {EltTy, TupTy}, {Val, Idx});
918 auto *ExtractVec = CGF.
Builder.CreateExtractVector(
919 FixedVecTy, TupleElement,
uint64_t(0));
946 llvm::Type *EltTy = DstSTy->getElementType(0);
947 if (
auto *ArrayTy = dyn_cast<llvm::ArrayType>(EltTy)) {
948 assert(ArrayTy->getNumElements() == 1);
949 EltTy = ArrayTy->getElementType();
951 auto *Coerced = CGF.
Builder.CreateExtractVector(
958class RISCVTargetCodeGenInfo :
public TargetCodeGenInfo {
960 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
unsigned XLen,
961 unsigned FLen,
bool EABI)
963 std::make_unique<RISCVABIInfo>(CGT, XLen, FLen, EABI)) {
965 std::make_unique<SwiftABIInfo>(CGT,
false);
968 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
969 CodeGen::CodeGenModule &CGM)
const override {
970 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
976 Fn->addFnAttr(
"hw-shadow-stack");
978 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
982 StringRef
Kind =
"machine";
983 bool HasSiFiveCLICPreemptible =
false;
984 bool HasSiFiveCLICStackSwap =
false;
985 for (RISCVInterruptAttr::InterruptType
type : Attr->interrupt()) {
987 case RISCVInterruptAttr::machine:
991 case RISCVInterruptAttr::supervisor:
994 case RISCVInterruptAttr::rnmi:
997 case RISCVInterruptAttr::qcinest:
1000 case RISCVInterruptAttr::qcinonest:
1001 Kind =
"qci-nonest";
1005 case RISCVInterruptAttr::SiFiveCLICPreemptible: {
1006 HasSiFiveCLICPreemptible =
true;
1007 Kind = HasSiFiveCLICStackSwap ?
"SiFive-CLIC-preemptible-stack-swap"
1008 :
"SiFive-CLIC-preemptible";
1011 case RISCVInterruptAttr::SiFiveCLICStackSwap: {
1012 HasSiFiveCLICStackSwap =
true;
1013 Kind = HasSiFiveCLICPreemptible ?
"SiFive-CLIC-preemptible-stack-swap"
1014 :
"SiFive-CLIC-stack-swap";
1020 Fn->addFnAttr(
"interrupt", Kind);
1025std::unique_ptr<TargetCodeGenInfo>
1027 unsigned FLen,
bool EABI) {
1028 return std::make_unique<RISCVTargetCodeGenInfo>(CGM.
getTypes(), XLen, FLen,
#define CC_VLS_CASE(ABI_VLEN)
static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type)
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Attr - This represents one attribute.
bool isZero() const
isZero - Test whether the quantity equals zero.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
static ABIArgInfo getIgnore()
static ABIArgInfo getTargetSpecific(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T=nullptr)
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
FunctionType::ExtInfo getExtInfo() const
ABIArgInfo & getReturnInfo()
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
unsigned getNumRequiredArgs() const
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
This class organizes the cross-function state that is used while generating LLVM code.
CodeGenTypes & getTypes()
const CodeGenOptions & getCodeGenOpts() const
DefaultABIInfo - The default implementation for ABI specific details.
CallingConv getCC() const
field_range fields() const
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
const T * castAs() const
Member-template castAs<specific type>.
bool isScalarType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
bool isStructureOrClassType() const
bool isVectorType() const
bool isRealFloatingType() const
Floating point categories.
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen, unsigned FLen, bool EABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Result
The result type of a method or function.
U cast(CodeGen::Address addr)