35#include "llvm/ADT/STLExtras.h"
36#include "llvm/ADT/StringExtras.h"
37#include "llvm/Analysis/ValueTracking.h"
38#include "llvm/IR/Assumptions.h"
39#include "llvm/IR/AttributeMask.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugInfoMetadata.h"
44#include "llvm/IR/InlineAsm.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/Type.h"
48#include "llvm/Transforms/Utils/Local.h"
58 return llvm::CallingConv::C;
60 return llvm::CallingConv::X86_StdCall;
62 return llvm::CallingConv::X86_FastCall;
64 return llvm::CallingConv::X86_RegCall;
66 return llvm::CallingConv::X86_ThisCall;
68 return llvm::CallingConv::Win64;
70 return llvm::CallingConv::X86_64_SysV;
72 return llvm::CallingConv::ARM_AAPCS;
74 return llvm::CallingConv::ARM_AAPCS_VFP;
76 return llvm::CallingConv::Intel_OCL_BI;
79 return llvm::CallingConv::C;
82 return llvm::CallingConv::X86_VectorCall;
84 return llvm::CallingConv::AArch64_VectorCall;
86 return llvm::CallingConv::AArch64_SVE_VectorCall;
88 return llvm::CallingConv::SPIR_FUNC;
90 return CGM.getTargetCodeGenInfo().getDeviceKernelCallingConv();
92 return llvm::CallingConv::PreserveMost;
94 return llvm::CallingConv::PreserveAll;
96 return llvm::CallingConv::Swift;
98 return llvm::CallingConv::SwiftTail;
100 return llvm::CallingConv::M68k_RTD;
102 return llvm::CallingConv::PreserveNone;
106#define CC_VLS_CASE(ABI_VLEN) \
107 case CC_RISCVVLSCall_##ABI_VLEN: \
108 return llvm::CallingConv::RISCV_VLSCall_##ABI_VLEN;
133 RecTy = Context.getCanonicalTagType(RD);
135 RecTy = Context.VoidTy;
140 return Context.getPointerType(RecTy);
173 assert(paramInfos.size() <= prefixArgs);
174 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
176 paramInfos.reserve(totalArgs);
179 paramInfos.resize(prefixArgs);
183 paramInfos.push_back(ParamInfo);
185 if (ParamInfo.hasPassObjectSize())
186 paramInfos.emplace_back();
189 assert(paramInfos.size() <= totalArgs &&
190 "Did we forget to insert pass_object_size args?");
192 paramInfos.resize(totalArgs);
202 if (!FPT->hasExtParameterInfos()) {
203 assert(paramInfos.empty() &&
204 "We have paramInfos, but the prototype doesn't?");
205 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
209 unsigned PrefixSize = prefix.size();
213 prefix.reserve(prefix.size() + FPT->getNumParams());
215 auto ExtInfos = FPT->getExtParameterInfos();
216 assert(ExtInfos.size() == FPT->getNumParams());
217 for (
unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
218 prefix.push_back(FPT->getParamType(I));
219 if (ExtInfos[I].hasPassObjectSize())
244 FTP->getExtInfo(), paramInfos,
Required);
254 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
259 bool IsTargetDefaultMSABI) {
264 if (D->
hasAttr<FastCallAttr>())
270 if (D->
hasAttr<ThisCallAttr>())
273 if (D->
hasAttr<VectorCallAttr>())
279 if (PcsAttr *PCS = D->
getAttr<PcsAttr>())
282 if (D->
hasAttr<AArch64VectorPcsAttr>())
285 if (D->
hasAttr<AArch64SVEPcsAttr>())
288 if (D->
hasAttr<DeviceKernelAttr>())
291 if (D->
hasAttr<IntelOclBiccAttr>())
300 if (D->
hasAttr<PreserveMostAttr>())
303 if (D->
hasAttr<PreserveAllAttr>())
309 if (D->
hasAttr<PreserveNoneAttr>())
312 if (D->
hasAttr<RISCVVectorCCAttr>())
315 if (RISCVVLSCCAttr *PCS = D->
getAttr<RISCVVLSCCAttr>()) {
316 switch (PCS->getVectorWidth()) {
318 llvm_unreachable(
"Invalid RISC-V VLS ABI VLEN");
319#define CC_VLS_CASE(ABI_VLEN) \
321 return CC_RISCVVLSCall_##ABI_VLEN;
356 return ::arrangeLLVMFunctionInfo(
357 *
this,
true, argTypes,
364 if (FD->
hasAttr<CUDAGlobalAttr>()) {
400 !Target.getCXXABI().hasConstructorVariants();
413 bool PassParams =
true;
415 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
418 if (
auto Inherited = CD->getInheritedConstructor())
430 if (!paramInfos.empty()) {
433 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
436 paramInfos.append(AddedArgs.
Suffix,
441 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
447 ? CGM.getContext().VoidPtrTy
450 argTypes, extInfo, paramInfos, required);
456 for (
auto &arg : args)
464 for (
auto &arg : args)
471 unsigned totalArgs) {
489 unsigned ExtraPrefixArgs,
unsigned ExtraSuffixArgs,
bool PassProtoArgs) {
491 for (
const auto &Arg : args)
492 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
495 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
500 FPT, TotalPrefixArgs + ExtraSuffixArgs)
506 ? CGM.getContext().VoidPtrTy
513 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
520 ArgTypes, Info, ParamInfos,
Required);
529 if (MD->isImplicitObjectMemberFunction())
537 if (DeviceKernelAttr::isOpenCLSpelling(FD->
getAttr<DeviceKernelAttr>()) &&
540 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
548 {}, noProto->getExtInfo(), {},
575 argTys.push_back(Context.getCanonicalParamType(receiverType));
577 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
579 argTys.push_back(Context.getCanonicalParamType(I->getType()));
581 I->hasAttr<NoEscapeAttr>());
582 extParamInfos.push_back(extParamInfo);
586 bool IsTargetDefaultMSABI =
592 if (
getContext().getLangOpts().ObjCAutoRefCount &&
593 MD->
hasAttr<NSReturnsRetainedAttr>())
630 assert(MD->
isVirtual() &&
"only methods have thunks");
647 ArgTys.push_back(*FTP->param_type_begin());
649 ArgTys.push_back(Context.IntTy);
650 CallingConv CC = Context.getDefaultCallingConvention(
662 unsigned numExtraRequiredArgs,
bool chainCall) {
663 assert(args.size() >= numExtraRequiredArgs);
673 if (proto->isVariadic())
676 if (proto->hasExtParameterInfos())
690 for (
const auto &arg : args)
695 paramInfos, required);
705 chainCall ? 1 : 0, chainCall);
734 for (
const auto &Arg : args)
735 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
775 assert(numPrefixArgs + 1 <= args.size() &&
776 "Emitting a call with less args than the required prefix?");
787 paramInfos, required);
798 assert(signature.
arg_size() <= args.size());
799 if (signature.
arg_size() == args.size())
804 if (!sigParamInfos.empty()) {
805 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
806 paramInfos.resize(args.size());
838 assert(llvm::all_of(argTypes,
839 [](
CanQualType T) {
return T.isCanonicalAsParam(); }));
842 llvm::FoldingSetNodeID ID;
847 bool isDelegateCall =
850 info, paramInfos, required, resultType, argTypes);
852 void *insertPos =
nullptr;
853 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
861 info, paramInfos, resultType, argTypes, required);
862 FunctionInfos.InsertNode(FI, insertPos);
864 bool inserted = FunctionsBeingProcessed.insert(FI).second;
866 assert(inserted &&
"Recursively being processed?");
869 if (CC == llvm::CallingConv::SPIR_KERNEL) {
876 CGM.getABIInfo().computeInfo(*FI);
887 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
890 bool erased = FunctionsBeingProcessed.erase(FI);
892 assert(erased &&
"Not in set?");
898 bool chainCall,
bool delegateCall,
904 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
908 void *buffer =
operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
909 argTypes.size() + 1, paramInfos.size()));
911 CGFunctionInfo *FI =
new (buffer) CGFunctionInfo();
912 FI->CallingConvention = llvmCC;
913 FI->EffectiveCallingConvention = llvmCC;
914 FI->ASTCallingConvention = info.
getCC();
915 FI->InstanceMethod = instanceMethod;
916 FI->ChainCall = chainCall;
917 FI->DelegateCall = delegateCall;
923 FI->Required = required;
926 FI->ArgStruct =
nullptr;
927 FI->ArgStructAlign = 0;
928 FI->NumArgs = argTypes.size();
929 FI->HasExtParameterInfos = !paramInfos.empty();
930 FI->getArgsBuffer()[0].
type = resultType;
931 FI->MaxVectorWidth = 0;
932 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
933 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
934 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
935 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
945struct TypeExpansion {
946 enum TypeExpansionKind {
958 const TypeExpansionKind Kind;
960 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
961 virtual ~TypeExpansion() {}
964struct ConstantArrayExpansion : TypeExpansion {
968 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
969 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
970 static bool classof(
const TypeExpansion *TE) {
971 return TE->Kind == TEK_ConstantArray;
975struct RecordExpansion : TypeExpansion {
976 SmallVector<const CXXBaseSpecifier *, 1> Bases;
978 SmallVector<const FieldDecl *, 1> Fields;
980 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
981 SmallVector<const FieldDecl *, 1> &&Fields)
982 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
983 Fields(std::move(Fields)) {}
984 static bool classof(
const TypeExpansion *TE) {
985 return TE->Kind == TEK_Record;
989struct ComplexExpansion : TypeExpansion {
992 ComplexExpansion(QualType EltTy) : TypeExpansion(
TEK_Complex), EltTy(EltTy) {}
993 static bool classof(
const TypeExpansion *TE) {
998struct NoExpansion : TypeExpansion {
999 NoExpansion() : TypeExpansion(TEK_None) {}
1000 static bool classof(
const TypeExpansion *TE) {
return TE->Kind == TEK_None; }
1004static std::unique_ptr<TypeExpansion>
1007 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
1013 assert(!RD->hasFlexibleArrayMember() &&
1014 "Cannot expand structure with flexible array.");
1015 if (RD->isUnion()) {
1021 for (
const auto *FD : RD->fields()) {
1022 if (FD->isZeroLengthBitField())
1024 assert(!FD->isBitField() &&
1025 "Cannot expand structure with bit-field members.");
1026 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
1027 if (UnionSize < FieldSize) {
1028 UnionSize = FieldSize;
1033 Fields.push_back(LargestFD);
1035 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1036 assert(!CXXRD->isDynamicClass() &&
1037 "cannot expand vtable pointers in dynamic classes");
1038 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
1041 for (
const auto *FD : RD->fields()) {
1042 if (FD->isZeroLengthBitField())
1044 assert(!FD->isBitField() &&
1045 "Cannot expand structure with bit-field members.");
1046 Fields.push_back(FD);
1049 return std::make_unique<RecordExpansion>(std::move(Bases),
1053 return std::make_unique<ComplexExpansion>(CT->getElementType());
1055 return std::make_unique<NoExpansion>();
1060 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1063 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1065 for (
auto BS : RExp->Bases)
1067 for (
auto FD : RExp->Fields)
1080 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1081 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1084 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1085 for (
auto BS : RExp->Bases)
1087 for (
auto FD : RExp->Fields)
1089 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1100 ConstantArrayExpansion *CAE,
1102 llvm::function_ref<
void(
Address)> Fn) {
1103 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1109void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1110 llvm::Function::arg_iterator &AI) {
1111 assert(LV.isSimple() &&
1112 "Unexpected non-simple lvalue during struct expansion.");
1115 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1117 *
this, CAExp, LV.getAddress(), [&](Address EltAddr) {
1118 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1119 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1121 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1122 Address
This = LV.getAddress();
1123 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1127 false, SourceLocation());
1128 LValue SubLV = MakeAddrLValue(Base, BS->
getType());
1131 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1133 for (
auto FD : RExp->Fields) {
1135 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1136 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1139 auto realValue = &*AI++;
1140 auto imagValue = &*AI++;
1141 EmitStoreOfComplex(
ComplexPairTy(realValue, imagValue), LV,
true);
1146 llvm::Value *Arg = &*AI++;
1147 if (LV.isBitField()) {
1153 if (Arg->getType()->isPointerTy()) {
1154 Address
Addr = LV.getAddress();
1155 Arg = Builder.CreateBitCast(Arg,
Addr.getElementType());
1157 EmitStoreOfScalar(Arg, LV);
1162void CodeGenFunction::ExpandTypeToArgs(
1163 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1164 SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos) {
1166 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1171 CallArg(convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1173 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1176 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1179 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1183 false, SourceLocation());
1187 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1191 LValue LV = MakeAddrLValue(This, Ty);
1192 for (
auto FD : RExp->Fields) {
1194 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1195 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1200 IRCallArgs[IRCallArgPos++] = CV.first;
1201 IRCallArgs[IRCallArgPos++] = CV.second;
1205 assert(RV.isScalar() &&
1206 "Unexpected non-scalar rvalue during struct expansion.");
1209 llvm::Value *
V = RV.getScalarVal();
1210 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1211 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1212 V = Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1214 IRCallArgs[IRCallArgPos++] =
V;
1222 const Twine &Name =
"tmp") {
1235 llvm::StructType *SrcSTy,
1239 if (SrcSTy->getNumElements() == 0)
1248 uint64_t FirstEltSize = CGF.
CGM.
getDataLayout().getTypeStoreSize(FirstElt);
1249 if (FirstEltSize < DstSize &&
1258 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1273 if (Val->getType() == Ty)
1279 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1285 llvm::Type *DestIntTy = Ty;
1289 if (Val->getType() != DestIntTy) {
1291 if (DL.isBigEndian()) {
1294 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1295 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1297 if (SrcSize > DstSize) {
1298 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1299 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1301 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1302 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1306 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1311 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1318 if (PFPFields.empty())
1321 auto LoadCoercedField = [&](
CharUnits Offset,
1322 llvm::Type *FieldType) -> llvm::Value * {
1327 if (!PFPFields.empty() && PFPFields[0].Offset == Offset) {
1331 FieldVal = CGF.
Builder.CreatePtrToInt(FieldVal, FieldType);
1332 PFPFields.erase(PFPFields.begin());
1349 Val = CGF.
Builder.CreatePtrToInt(Val, Ty);
1353 auto *ET = AT->getElementType();
1357 llvm::Value *Val = llvm::PoisonValue::get(AT);
1358 for (
unsigned Idx = 0; Idx != AT->getNumElements(); ++Idx, Offset += WordSize)
1359 Val = CGF.
Builder.CreateInsertValue(Val, LoadCoercedField(Offset, ET), Idx);
1383 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1385 DstSize.getFixedValue(), CGF);
1400 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1401 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1415 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1416 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1419 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1420 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1421 ScalableDstTy = llvm::ScalableVectorType::get(
1422 FixedSrcTy->getElementType(),
1424 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
1426 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1428 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1429 llvm::Value *Result = CGF.
Builder.CreateInsertVector(
1430 ScalableDstTy, PoisonVec, Load, uint64_t(0),
"cast.scalable");
1432 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, Ty));
1433 if (Result->getType() != ScalableDstTy)
1434 Result = CGF.
Builder.CreateBitCast(Result, ScalableDstTy);
1435 if (Result->getType() != Ty)
1436 Result = CGF.
Builder.CreateExtractVector(Ty, Result, uint64_t(0));
1448 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1455 if (PFPFields.empty())
1458 llvm::Type *SrcTy = Src->getType();
1459 auto StoreCoercedField = [&](
CharUnits Offset, llvm::Value *FieldVal) {
1460 if (!PFPFields.empty() && PFPFields[0].Offset == Offset) {
1465 PFPFields.erase(PFPFields.begin());
1485 auto *ET = AT->getElementType();
1489 for (
unsigned i = 0; i != AT->getNumElements(); ++i, Offset += WordSize)
1490 StoreCoercedField(Offset, CGF.
Builder.CreateExtractValue(Src, i));
1496 Address Dst, llvm::TypeSize DstSize,
1497 bool DstIsVolatile) {
1501 llvm::Type *SrcTy = Src->getType();
1502 llvm::TypeSize SrcSize =
CGM.getDataLayout().getTypeAllocSize(SrcTy);
1508 if (llvm::StructType *DstSTy =
1510 assert(!SrcSize.isScalable());
1512 SrcSize.getFixedValue(), *
this);
1519 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1520 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1524 auto *I =
Builder.CreateStore(Src, Dst, DstIsVolatile);
1526 }
else if (llvm::StructType *STy =
1527 dyn_cast<llvm::StructType>(Src->getType())) {
1530 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1532 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1533 auto *I =
Builder.CreateStore(Elt, EltPtr, DstIsVolatile);
1541 }
else if (SrcTy->isIntegerTy()) {
1543 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1560 Builder.CreateStore(Src, Tmp);
1561 auto *I =
Builder.CreateMemCpy(
1580static std::pair<llvm::Value *, bool>
1582 llvm::ScalableVectorType *FromTy, llvm::Value *
V,
1583 StringRef Name =
"") {
1586 if (FromTy->getElementType()->isIntegerTy(1) &&
1587 ToTy->getElementType() == CGF.
Builder.getInt8Ty()) {
1588 if (!FromTy->getElementCount().isKnownMultipleOf(8)) {
1589 FromTy = llvm::ScalableVectorType::get(
1590 FromTy->getElementType(),
1591 llvm::alignTo<8>(FromTy->getElementCount().getKnownMinValue()));
1592 llvm::Value *ZeroVec = llvm::Constant::getNullValue(FromTy);
1593 V = CGF.
Builder.CreateInsertVector(FromTy, ZeroVec,
V, uint64_t(0));
1595 FromTy = llvm::ScalableVectorType::get(
1596 ToTy->getElementType(),
1597 FromTy->getElementCount().getKnownMinValue() / 8);
1598 V = CGF.
Builder.CreateBitCast(
V, FromTy);
1600 if (FromTy->getElementType() == ToTy->getElementType()) {
1601 V->setName(Name +
".coerce");
1602 V = CGF.
Builder.CreateExtractVector(ToTy,
V, uint64_t(0),
"cast.fixed");
1612class ClangToLLVMArgMapping {
1613 static const unsigned InvalidIndex = ~0U;
1614 unsigned InallocaArgNo;
1616 unsigned TotalIRArgs;
1620 unsigned PaddingArgIndex;
1623 unsigned FirstArgIndex;
1624 unsigned NumberOfArgs;
1627 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1631 SmallVector<IRArgs, 8> ArgInfo;
1634 ClangToLLVMArgMapping(
const ASTContext &Context,
const CGFunctionInfo &FI,
1635 bool OnlyRequiredArgs =
false)
1636 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1637 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1638 construct(Context, FI, OnlyRequiredArgs);
1641 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1642 unsigned getInallocaArgNo()
const {
1643 assert(hasInallocaArg());
1644 return InallocaArgNo;
1647 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1648 unsigned getSRetArgNo()
const {
1649 assert(hasSRetArg());
1653 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1655 bool hasPaddingArg(
unsigned ArgNo)
const {
1656 assert(ArgNo < ArgInfo.size());
1657 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1659 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1660 assert(hasPaddingArg(ArgNo));
1661 return ArgInfo[ArgNo].PaddingArgIndex;
1666 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1667 assert(ArgNo < ArgInfo.size());
1668 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1669 ArgInfo[ArgNo].NumberOfArgs);
1673 void construct(
const ASTContext &Context,
const CGFunctionInfo &FI,
1674 bool OnlyRequiredArgs);
1677void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1678 const CGFunctionInfo &FI,
1679 bool OnlyRequiredArgs) {
1680 unsigned IRArgNo = 0;
1681 bool SwapThisWithSRet =
false;
1686 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1694 QualType ArgType = I->type;
1695 const ABIArgInfo &AI = I->info;
1697 auto &IRArgs = ArgInfo[ArgNo];
1700 IRArgs.PaddingArgIndex = IRArgNo++;
1707 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1709 IRArgs.NumberOfArgs = STy->getNumElements();
1711 IRArgs.NumberOfArgs = 1;
1717 IRArgs.NumberOfArgs = 1;
1722 IRArgs.NumberOfArgs = 0;
1732 if (IRArgs.NumberOfArgs > 0) {
1733 IRArgs.FirstArgIndex = IRArgNo;
1734 IRArgNo += IRArgs.NumberOfArgs;
1739 if (IRArgNo == 1 && SwapThisWithSRet)
1742 assert(ArgNo == ArgInfo.size());
1745 InallocaArgNo = IRArgNo++;
1747 TotalIRArgs = IRArgNo;
1755 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1770 switch (BT->getKind()) {
1773 case BuiltinType::Float:
1775 case BuiltinType::Double:
1777 case BuiltinType::LongDouble:
1788 if (BT->getKind() == BuiltinType::LongDouble)
1789 return getTarget().useObjCFP2RetForComplexLongDouble();
1803 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1805 assert(Inserted &&
"Recursively being processed?");
1807 llvm::Type *resultType =
nullptr;
1812 llvm_unreachable(
"Invalid ABI kind for return argument");
1824 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1825 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1841 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1845 if (IRFunctionArgs.hasSRetArg()) {
1846 ArgTypes[IRFunctionArgs.getSRetArgNo()] = llvm::PointerType::get(
1851 if (IRFunctionArgs.hasInallocaArg())
1852 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1859 for (; it != ie; ++it, ++ArgNo) {
1863 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1864 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1867 unsigned FirstIRArg, NumIRArgs;
1868 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1873 assert(NumIRArgs == 0);
1877 assert(NumIRArgs == 1);
1879 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1883 assert(NumIRArgs == 1);
1884 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1893 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1895 assert(NumIRArgs == st->getNumElements());
1896 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1897 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1899 assert(NumIRArgs == 1);
1900 ArgTypes[FirstIRArg] = argType;
1906 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1908 *ArgTypesIter++ = EltTy;
1910 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1915 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1917 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1922 bool Erased = FunctionsBeingProcessed.erase(&FI);
1924 assert(Erased &&
"Not in set?");
1926 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1940 llvm::AttrBuilder &FuncAttrs,
1947 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1951 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1953 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1955 FuncAttrs.addAttribute(
"aarch64_za_state_agnostic");
1959 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1961 FuncAttrs.addAttribute(
"aarch64_in_za");
1963 FuncAttrs.addAttribute(
"aarch64_out_za");
1965 FuncAttrs.addAttribute(
"aarch64_inout_za");
1969 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1971 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1973 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1975 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1979 const Decl *Callee) {
1985 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1986 AA->getAssumption().split(Attrs,
",");
1989 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1990 llvm::join(Attrs.begin(), Attrs.end(),
","));
1997 if (
const RecordType *RT =
1999 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
2000 return ClassDecl->hasTrivialDestructor();
2006 const Decl *TargetDecl) {
2012 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
2016 if (!
Module.getLangOpts().CPlusPlus)
2019 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
2020 if (FDecl->isExternC())
2022 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
2024 if (VDecl->isExternC())
2032 return Module.getCodeGenOpts().StrictReturn ||
2033 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
2034 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
2041 llvm::DenormalMode FP32DenormalMode,
2042 llvm::AttrBuilder &FuncAttrs) {
2043 llvm::DenormalFPEnv FPEnv(FPDenormalMode, FP32DenormalMode);
2044 if (FPEnv != llvm::DenormalFPEnv::getDefault())
2045 FuncAttrs.addDenormalFPEnvAttr(FPEnv);
2053 llvm::AttrBuilder &FuncAttrs) {
2059 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
2061 llvm::AttrBuilder &FuncAttrs) {
2064 if (CodeGenOpts.OptimizeSize)
2065 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
2066 if (CodeGenOpts.OptimizeSize == 2)
2067 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
2070 if (CodeGenOpts.DisableRedZone)
2071 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
2072 if (CodeGenOpts.IndirectTlsSegRefs)
2073 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
2074 if (CodeGenOpts.NoImplicitFloat)
2075 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
2077 if (AttrOnCallSite) {
2082 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
2084 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
2086 switch (CodeGenOpts.getFramePointer()) {
2094 FuncAttrs.addAttribute(
"frame-pointer",
2096 CodeGenOpts.getFramePointer()));
2099 if (CodeGenOpts.LessPreciseFPMAD)
2100 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
2102 if (CodeGenOpts.NullPointerIsValid)
2103 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
2106 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
2110 if (CodeGenOpts.SoftFloat)
2111 FuncAttrs.addAttribute(
"use-soft-float",
"true");
2112 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
2113 llvm::utostr(CodeGenOpts.SSPBufferSize));
2114 if (LangOpts.NoSignedZero)
2115 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
2118 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
2119 if (!Recips.empty())
2120 FuncAttrs.addAttribute(
"reciprocal-estimates", llvm::join(Recips,
","));
2124 FuncAttrs.addAttribute(
"prefer-vector-width",
2127 if (CodeGenOpts.StackRealignment)
2128 FuncAttrs.addAttribute(
"stackrealign");
2129 if (CodeGenOpts.Backchain)
2130 FuncAttrs.addAttribute(
"backchain");
2131 if (CodeGenOpts.EnableSegmentedStacks)
2132 FuncAttrs.addAttribute(
"split-stack");
2134 if (CodeGenOpts.SpeculativeLoadHardening)
2135 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2138 switch (CodeGenOpts.getZeroCallUsedRegs()) {
2139 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
2140 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2142 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
2143 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
2145 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
2146 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
2148 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
2149 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
2151 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
2152 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
2154 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
2155 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
2157 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2158 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2160 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2161 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2163 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2164 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2175 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2180 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2181 LangOpts.SYCLIsDevice) {
2182 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2185 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2186 FuncAttrs.addAttribute(
"save-reg-params");
2189 StringRef Var,
Value;
2191 FuncAttrs.addAttribute(Var,
Value);
2205 const llvm::Function &F,
2207 auto FFeatures = F.getFnAttribute(
"target-features");
2209 llvm::StringSet<> MergedNames;
2211 MergedFeatures.reserve(TargetOpts.
Features.size());
2213 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2214 for (StringRef
Feature : FeatureRange) {
2218 StringRef Name =
Feature.drop_front(1);
2219 bool Merged = !MergedNames.insert(Name).second;
2221 MergedFeatures.push_back(
Feature);
2225 if (FFeatures.isValid())
2226 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2227 AddUnmergedFeatures(TargetOpts.
Features);
2229 if (!MergedFeatures.empty()) {
2230 llvm::sort(MergedFeatures);
2231 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2238 bool WillInternalize) {
2240 llvm::AttrBuilder FuncAttrs(F.getContext());
2243 if (!TargetOpts.
CPU.empty())
2244 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2245 if (!TargetOpts.
TuneCPU.empty())
2246 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2249 CodeGenOpts, LangOpts,
2252 if (!WillInternalize && F.isInterposable()) {
2257 F.addFnAttrs(FuncAttrs);
2261 llvm::AttributeMask AttrsToRemove;
2265 llvm::DenormalFPEnv MergedFPEnv =
2266 OptsFPEnv.mergeCalleeMode(F.getDenormalFPEnv());
2268 if (MergedFPEnv == llvm::DenormalFPEnv::getDefault()) {
2269 AttrsToRemove.addAttribute(llvm::Attribute::DenormalFPEnv);
2272 FuncAttrs.addDenormalFPEnvAttr(MergedFPEnv);
2275 F.removeFnAttrs(AttrsToRemove);
2279 F.addFnAttrs(FuncAttrs);
2282void CodeGenModule::getTrivialDefaultFunctionAttributes(
2283 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2284 llvm::AttrBuilder &FuncAttrs) {
2286 getLangOpts(), AttrOnCallSite,
2290void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2292 bool AttrOnCallSite,
2293 llvm::AttrBuilder &FuncAttrs) {
2297 if (!AttrOnCallSite)
2303 if (!AttrOnCallSite)
2308 llvm::AttrBuilder &attrs) {
2309 getDefaultFunctionAttributes(
"",
false,
2311 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2316 const NoBuiltinAttr *NBA =
nullptr) {
2317 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2319 AttributeName +=
"no-builtin-";
2320 AttributeName += BuiltinName;
2321 FuncAttrs.addAttribute(AttributeName);
2325 if (LangOpts.NoBuiltin) {
2327 FuncAttrs.addAttribute(
"no-builtins");
2341 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2342 FuncAttrs.addAttribute(
"no-builtins");
2347 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2351 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2352 bool CheckCoerce =
true) {
2359 if (!DL.typeSizeEqualsStoreSize(Ty))
2366 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2367 DL.getTypeSizeInBits(Ty)))
2391 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2393 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2402 unsigned NumRequiredArgs,
unsigned ArgNo) {
2403 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2408 if (ArgNo >= NumRequiredArgs)
2412 if (ArgNo < FD->getNumParams()) {
2413 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2414 if (Param && Param->hasAttr<MaybeUndefAttr>())
2431 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2434 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2436 llvm::all_of(ST->elements(),
2437 llvm::AttributeFuncs::isNoFPClassCompatibleType);
2445 llvm::FPClassTest Mask = llvm::fcNone;
2446 if (LangOpts.NoHonorInfs)
2447 Mask |= llvm::fcInf;
2448 if (LangOpts.NoHonorNaNs)
2449 Mask |= llvm::fcNan;
2455 llvm::AttributeList &Attrs) {
2456 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2457 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2458 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2484 llvm::AttributeList &AttrList,
2486 bool AttrOnCallSite,
bool IsThunk) {
2494 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2496 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2507 bool HasOptnone =
false;
2509 const NoBuiltinAttr *NBA =
nullptr;
2513 std::optional<llvm::Attribute::AttrKind> MemAttrForPtrArgs;
2514 bool AddedPotentialArgAccess =
false;
2515 auto AddPotentialArgAccess = [&]() {
2516 AddedPotentialArgAccess =
true;
2517 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2519 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2520 llvm::MemoryEffects::argMemOnly());
2527 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2528 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2529 if (TargetDecl->
hasAttr<NoThrowAttr>())
2530 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2531 if (TargetDecl->
hasAttr<NoReturnAttr>())
2532 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2533 if (TargetDecl->
hasAttr<ColdAttr>())
2534 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2535 if (TargetDecl->
hasAttr<HotAttr>())
2536 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2537 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2538 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2539 if (TargetDecl->
hasAttr<ConvergentAttr>())
2540 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2542 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2545 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2547 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2549 (Kind == OO_New || Kind == OO_Array_New))
2550 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2553 const bool IsVirtualCall = MD && MD->
isVirtual();
2556 if (!(AttrOnCallSite && IsVirtualCall)) {
2557 if (Fn->isNoReturn())
2558 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2559 NBA = Fn->getAttr<NoBuiltinAttr>();
2566 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2567 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2571 if (TargetDecl->
hasAttr<ConstAttr>()) {
2572 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2573 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2576 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2577 MemAttrForPtrArgs = llvm::Attribute::ReadNone;
2578 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2579 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2580 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2582 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2583 MemAttrForPtrArgs = llvm::Attribute::ReadOnly;
2584 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2585 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2586 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2588 if (
const auto *RA = TargetDecl->
getAttr<RestrictAttr>();
2589 RA && RA->getDeallocator() ==
nullptr)
2590 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2591 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2592 !CodeGenOpts.NullPointerIsValid)
2593 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2594 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2595 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2596 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2597 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2598 if (TargetDecl->
hasAttr<LeafAttr>())
2599 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2600 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2601 FuncAttrs.addAttribute(
"bpf_fastcall");
2603 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2604 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2605 std::optional<unsigned> NumElemsParam;
2606 if (AllocSize->getNumElemsParam().isValid())
2607 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2608 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2618 FuncAttrs.addAttribute(
"uniform-work-group-size");
2620 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2621 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2623 if (
auto *ModularFormat = TargetDecl->
getAttr<ModularFormatAttr>()) {
2624 FormatAttr *Format = TargetDecl->
getAttr<FormatAttr>();
2625 StringRef
Type = Format->getType()->getName();
2626 std::string FormatIdx = std::to_string(Format->getFormatIdx());
2627 std::string FirstArg = std::to_string(Format->getFirstArg());
2629 Type, FormatIdx, FirstArg,
2630 ModularFormat->getModularImplFn()->getName(),
2631 ModularFormat->getImplName()};
2632 llvm::append_range(Args, ModularFormat->aspects());
2633 FuncAttrs.addAttribute(
"modular-format", llvm::join(Args,
","));
2646 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2651 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2652 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2653 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2654 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2655 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2656 FuncAttrs.removeAttribute(
"split-stack");
2657 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2660 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2661 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2662 FuncAttrs.addAttribute(
2663 "zero-call-used-regs",
2664 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2671 if (CodeGenOpts.NoPLT) {
2672 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2673 if (!Fn->isDefined() && !AttrOnCallSite) {
2674 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2679 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2680 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2685 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2686 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2687 if (!FD->isExternallyVisible())
2688 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2695 if (!AttrOnCallSite) {
2696 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2697 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2700 auto shouldDisableTailCalls = [&] {
2702 if (CodeGenOpts.DisableTailCalls)
2708 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2709 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2712 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2713 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2714 if (!BD->doesNotEscape())
2720 if (shouldDisableTailCalls())
2721 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2726 static const llvm::StringSet<> ReturnsTwiceFn{
2727 "_setjmpex",
"setjmp",
"_setjmp",
"vfork",
2728 "sigsetjmp",
"__sigsetjmp",
"savectx",
"getcontext"};
2729 if (ReturnsTwiceFn.contains(Name))
2730 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2734 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2737 if (!MSHotPatchFunctions.empty()) {
2738 bool IsHotPatched = llvm::binary_search(MSHotPatchFunctions, Name);
2740 FuncAttrs.addAttribute(
"marked_for_windows_hot_patching");
2745 if (CodeGenOpts.isLoaderReplaceableFunctionName(Name))
2746 FuncAttrs.addAttribute(
"loader-replaceable");
2749 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2756 if (CodeGenOpts.EnableNoundefAttrs &&
2760 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2766 RetAttrs.addAttribute(llvm::Attribute::SExt);
2768 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2770 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2775 RetAttrs.addAttribute(llvm::Attribute::InReg);
2787 AddPotentialArgAccess();
2796 llvm_unreachable(
"Invalid ABI kind for return argument");
2804 RetAttrs.addDereferenceableAttr(
2806 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2807 !CodeGenOpts.NullPointerIsValid)
2808 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2810 llvm::Align Alignment =
2812 RetAttrs.addAlignmentAttr(Alignment);
2817 bool hasUsedSRet =
false;
2821 if (IRFunctionArgs.hasSRetArg()) {
2823 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2824 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2825 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2828 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2830 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2835 if (IRFunctionArgs.hasInallocaArg()) {
2838 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2848 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2850 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2856 if (!CodeGenOpts.NullPointerIsValid &&
2858 Attrs.addAttribute(llvm::Attribute::NonNull);
2865 Attrs.addDereferenceableOrNullAttr(
2871 llvm::Align Alignment =
2875 Attrs.addAlignmentAttr(Alignment);
2877 const auto *DD = dyn_cast_if_present<CXXDestructorDecl>(
2885 CodeGenOpts.StrictLifetimes) {
2887 dyn_cast<CXXRecordDecl>(DD->getDeclContext());
2894 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo(
2895 Context.getASTRecordLayout(ClassDecl).getDataSize().getQuantity()));
2898 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2903 I != E; ++I, ++ArgNo) {
2909 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2911 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2914 .addAttribute(llvm::Attribute::InReg));
2919 if (CodeGenOpts.EnableNoundefAttrs &&
2921 Attrs.addAttribute(llvm::Attribute::NoUndef);
2930 Attrs.addAttribute(llvm::Attribute::SExt);
2932 Attrs.addAttribute(llvm::Attribute::ZExt);
2934 Attrs.addAttribute(llvm::Attribute::NoExt);
2939 Attrs.addAttribute(llvm::Attribute::Nest);
2941 Attrs.addAttribute(llvm::Attribute::InReg);
2942 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2949 Attrs.addAttribute(llvm::Attribute::InReg);
2961 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2969 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo());
2974 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2975 Decl->getArgPassingRestrictions() ==
2979 Attrs.addAttribute(llvm::Attribute::NoAlias);
3004 AddPotentialArgAccess();
3009 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
3020 AddPotentialArgAccess();
3028 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
3029 !CodeGenOpts.NullPointerIsValid)
3030 Attrs.addAttribute(llvm::Attribute::NonNull);
3032 llvm::Align Alignment =
3034 Attrs.addAlignmentAttr(Alignment);
3043 DeviceKernelAttr::isOpenCLSpelling(
3044 TargetDecl->
getAttr<DeviceKernelAttr>()) &&
3048 llvm::Align Alignment =
3050 Attrs.addAlignmentAttr(Alignment);
3057 Attrs.addAttribute(llvm::Attribute::NoAlias);
3066 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
3071 Attrs.addAttribute(llvm::Attribute::NoAlias);
3075 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
3076 auto info =
getContext().getTypeInfoInChars(PTy);
3077 Attrs.addDereferenceableAttr(info.Width.getQuantity());
3078 Attrs.addAlignmentAttr(info.Align.getAsAlign());
3084 Attrs.addAttribute(llvm::Attribute::SwiftError);
3088 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
3092 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
3097 Attrs.addCapturesAttr(llvm::CaptureInfo::none());
3099 if (Attrs.hasAttributes()) {
3100 unsigned FirstIRArg, NumIRArgs;
3101 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3102 for (
unsigned i = 0; i < NumIRArgs; i++)
3103 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
3110 if (AddedPotentialArgAccess && MemAttrForPtrArgs) {
3114 I != E; ++I, ++ArgNo) {
3115 if (I->info.isDirect() || I->info.isExpand() ||
3116 I->info.isCoerceAndExpand()) {
3117 unsigned FirstIRArg, NumIRArgs;
3118 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3119 for (
unsigned i = FirstIRArg; i < FirstIRArg + NumIRArgs; ++i) {
3129 AttrList = llvm::AttributeList::get(
3138 llvm::Value *value) {
3139 llvm::Type *varType = CGF.
ConvertType(var->getType());
3143 if (value->getType() == varType)
3146 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) &&
3147 "unexpected promotion type");
3150 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
3152 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
3158 QualType ArgType,
unsigned ArgNo) {
3166 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
3170 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
3177 if (NNAttr->isNonNull(ArgNo))
3184struct CopyBackSwiftError final : EHScopeStack::Cleanup {
3187 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(
arg) {}
3188 void Emit(CodeGenFunction &CGF, Flags flags)
override {
3207 if (FD->hasImplicitReturnZero()) {
3208 QualType RetTy = FD->getReturnType().getUnqualifiedType();
3209 llvm::Type *LLVMTy =
CGM.getTypes().ConvertType(RetTy);
3210 llvm::Constant *
Zero = llvm::Constant::getNullValue(LLVMTy);
3218 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), FI);
3219 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
3224 if (IRFunctionArgs.hasInallocaArg())
3225 ArgStruct =
Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
3229 if (IRFunctionArgs.hasSRetArg()) {
3230 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
3231 AI->setName(
"agg.result");
3232 AI->addAttr(llvm::Attribute::NoAlias);
3239 ArgVals.reserve(Args.size());
3245 assert(FI.
arg_size() == Args.size() &&
3246 "Mismatch between function signature & arguments.");
3249 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e;
3250 ++i, ++info_it, ++ArgNo) {
3263 unsigned FirstIRArg, NumIRArgs;
3264 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3268 assert(NumIRArgs == 0);
3281 assert(NumIRArgs == 1);
3304 llvm::ConstantInt::get(
IntPtrTy, Size.getQuantity()));
3305 ParamAddr = AlignedTemp;
3322 auto AI = Fn->getArg(FirstIRArg);
3330 assert(NumIRArgs == 1);
3332 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3335 PVD->getFunctionScopeIndex()) &&
3336 !
CGM.getCodeGenOpts().NullPointerIsValid)
3337 AI->addAttr(llvm::Attribute::NonNull);
3339 QualType OTy = PVD->getOriginalType();
3340 if (
const auto *ArrTy =
getContext().getAsConstantArrayType(OTy)) {
3346 QualType ETy = ArrTy->getElementType();
3347 llvm::Align Alignment =
3348 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3350 .addAlignmentAttr(Alignment));
3351 uint64_t ArrSize = ArrTy->getZExtSize();
3355 Attrs.addDereferenceableAttr(
3356 getContext().getTypeSizeInChars(ETy).getQuantity() *
3358 AI->addAttrs(Attrs);
3359 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3361 !
CGM.getCodeGenOpts().NullPointerIsValid) {
3362 AI->addAttr(llvm::Attribute::NonNull);
3365 }
else if (
const auto *ArrTy =
3371 QualType ETy = ArrTy->getElementType();
3372 llvm::Align Alignment =
3373 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3375 .addAlignmentAttr(Alignment));
3376 if (!
getTypes().getTargetAddressSpace(ETy) &&
3377 !
CGM.getCodeGenOpts().NullPointerIsValid)
3378 AI->addAttr(llvm::Attribute::NonNull);
3383 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3386 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3387 if (AVAttr && !
SanOpts.has(SanitizerKind::Alignment)) {
3391 llvm::ConstantInt *AlignmentCI =
3393 uint64_t AlignmentInt =
3394 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3395 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3396 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3398 .addAlignmentAttr(llvm::Align(AlignmentInt)));
3405 AI->addAttr(llvm::Attribute::NoAlias);
3413 assert(NumIRArgs == 1);
3417 llvm::Value *
V = AI;
3425 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3426 llvm::Value *incomingErrorValue =
Builder.CreateLoad(arg);
3427 Builder.CreateStore(incomingErrorValue, temp);
3448 if (
V->getType() != LTy)
3459 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3460 llvm::Value *ArgVal = Fn->getArg(FirstIRArg);
3461 if (
auto *VecTyFrom =
3462 dyn_cast<llvm::ScalableVectorType>(ArgVal->getType())) {
3464 *
this, VecTyTo, VecTyFrom, ArgVal, Arg->
getName());
3466 assert(NumIRArgs == 1);
3473 llvm::StructType *STy =
3484 STy->getNumElements() > 1) {
3485 llvm::TypeSize StructSize =
CGM.getDataLayout().getTypeAllocSize(STy);
3486 llvm::TypeSize PtrElementSize =
3488 if (StructSize.isScalable()) {
3489 assert(STy->containsHomogeneousScalableVectorTypes() &&
3490 "ABI only supports structure with homogeneous scalable vector "
3492 assert(StructSize == PtrElementSize &&
3493 "Only allow non-fractional movement of structure with"
3494 "homogeneous scalable vector type");
3495 assert(STy->getNumElements() == NumIRArgs);
3497 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3498 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3499 auto *AI = Fn->getArg(FirstIRArg + i);
3500 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3502 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3505 Builder.CreateStore(LoadedStructValue, Ptr);
3507 uint64_t SrcSize = StructSize.getFixedValue();
3508 uint64_t DstSize = PtrElementSize.getFixedValue();
3511 if (SrcSize <= DstSize) {
3518 assert(STy->getNumElements() == NumIRArgs);
3519 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3520 auto AI = Fn->getArg(FirstIRArg + i);
3521 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3523 Builder.CreateStore(AI, EltPtr);
3526 if (SrcSize > DstSize) {
3527 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3539 assert(NumIRArgs == 1);
3540 auto AI = Fn->getArg(FirstIRArg);
3541 AI->setName(Arg->
getName() +
".coerce");
3544 llvm::TypeSize::getFixed(
3545 getContext().getTypeSizeInChars(Ty).getQuantity() -
3570 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3574 unsigned argIndex = FirstIRArg;
3575 unsigned unpaddedIndex = 0;
3576 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3577 llvm::Type *eltType = coercionType->getElementType(i);
3581 auto eltAddr =
Builder.CreateStructGEP(alloca, i);
3582 llvm::Value *elt = Fn->getArg(argIndex++);
3584 auto paramType = unpaddedStruct
3585 ? unpaddedStruct->getElementType(unpaddedIndex++)
3586 : unpaddedCoercionType;
3588 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(eltType)) {
3589 if (
auto *VecTyFrom = dyn_cast<llvm::ScalableVectorType>(paramType)) {
3592 *
this, VecTyTo, VecTyFrom, elt, elt->getName());
3593 assert(Extracted &&
"Unexpected scalable to fixed vector coercion");
3596 Builder.CreateStore(elt, eltAddr);
3598 assert(argIndex == FirstIRArg + NumIRArgs);
3610 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3611 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3612 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3613 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3614 auto AI = Fn->getArg(FirstIRArg + i);
3615 AI->setName(Arg->
getName() +
"." + Twine(i));
3621 auto *AI = Fn->getArg(FirstIRArg);
3622 AI->setName(Arg->
getName() +
".target_coerce");
3626 CGM.getABIInfo().createCoercedStore(AI, Ptr, ArgI,
false, *
this);
3640 assert(NumIRArgs == 0);
3652 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3653 for (
int I = Args.size() - 1; I >= 0; --I)
3656 for (
unsigned I = 0, E = Args.size(); I != E; ++I)
3662 while (insn->use_empty()) {
3663 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3669 bitcast->eraseFromParent();
3675 llvm::Value *result) {
3677 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3680 if (&BB->back() != result)
3683 llvm::Type *resultType = result->getType();
3692 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3698 if (generator->getNextNode() != bitcast)
3701 InstsToKill.push_back(bitcast);
3708 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3712 bool doRetainAutorelease;
3715 doRetainAutorelease =
true;
3716 }
else if (call->getCalledOperand() ==
3718 doRetainAutorelease =
false;
3726 llvm::Instruction *prev = call->getPrevNode();
3729 prev = prev->getPrevNode();
3735 InstsToKill.push_back(prev);
3741 result = call->getArgOperand(0);
3742 InstsToKill.push_back(call);
3746 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3747 if (!bitcast->hasOneUse())
3749 InstsToKill.push_back(bitcast);
3750 result = bitcast->getOperand(0);
3754 for (
auto *I : InstsToKill)
3755 I->eraseFromParent();
3758 if (doRetainAutorelease)
3762 return CGF.
Builder.CreateBitCast(result, resultType);
3767 llvm::Value *result) {
3770 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3779 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3780 if (!retainCall || retainCall->getCalledOperand() !=
3785 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3786 llvm::LoadInst *load =
3787 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3788 if (!load || load->isAtomic() || load->isVolatile() ||
3795 llvm::Type *resultType = result->getType();
3797 assert(retainCall->use_empty());
3798 retainCall->eraseFromParent();
3801 return CGF.
Builder.CreateBitCast(load, resultType);
3808 llvm::Value *result) {
3831 auto GetStoreIfValid = [&CGF,
3832 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3833 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3834 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3840 assert(!SI->isAtomic() &&
3848 if (!ReturnValuePtr->hasOneUse()) {
3849 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3855 const llvm::Instruction *LoadIntoFakeUse =
nullptr;
3856 for (llvm::Instruction &I : llvm::reverse(*IP)) {
3860 if (LoadIntoFakeUse == &I)
3864 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) {
3865 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3868 if (II->getIntrinsicID() == llvm::Intrinsic::fake_use) {
3869 LoadIntoFakeUse = dyn_cast<llvm::Instruction>(II->getArgOperand(0));
3873 return GetStoreIfValid(&I);
3878 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3884 llvm::BasicBlock *StoreBB = store->getParent();
3885 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3887 while (IP != StoreBB) {
3888 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3904 int BitWidth,
int CharWidth) {
3905 assert(CharWidth <= 64);
3906 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3909 if (BitOffset >= CharWidth) {
3910 Pos += BitOffset / CharWidth;
3911 BitOffset = BitOffset % CharWidth;
3914 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3915 if (BitOffset + BitWidth >= CharWidth) {
3916 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3917 BitWidth -= CharWidth - BitOffset;
3921 while (BitWidth >= CharWidth) {
3923 BitWidth -= CharWidth;
3927 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3935 int StorageSize,
int BitOffset,
int BitWidth,
3936 int CharWidth,
bool BigEndian) {
3939 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3942 std::reverse(TmpBits.begin(), TmpBits.end());
3944 for (uint64_t
V : TmpBits)
3945 Bits[StorageOffset++] |=
V;
3948static void setUsedBits(CodeGenModule &, QualType,
int,
3949 SmallVectorImpl<uint64_t> &);
3960 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3991 QualType ETy = Context.getBaseElementType(ATy);
3992 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3996 for (
int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3997 auto Src = TmpBits.begin();
3998 auto Dst = Bits.begin() + Offset + I * Size;
3999 for (
int J = 0; J < Size; ++J)
4012 if (
const auto *ATy = Context.getAsConstantArrayType(QTy))
4015 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
4019 std::fill_n(Bits.begin() + Offset, Size,
4020 (uint64_t(1) << Context.getCharWidth()) - 1);
4024 int Pos,
int Size,
int CharWidth,
4029 for (
auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
4031 Mask = (Mask << CharWidth) | *P;
4033 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
4035 Mask = (Mask << CharWidth) | *--P;
4044 llvm::IntegerType *ITy,
4046 assert(Src->getType() == ITy);
4047 assert(ITy->getScalarSizeInBits() <= 64);
4049 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
4050 int Size = DataLayout.getTypeStoreSize(ITy);
4054 int CharWidth =
CGM.getContext().getCharWidth();
4058 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
4064 llvm::ArrayType *ATy,
4066 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
4067 int Size = DataLayout.getTypeStoreSize(ATy);
4072 int CharWidth =
CGM.getContext().getCharWidth();
4074 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
4076 llvm::Value *R = llvm::PoisonValue::get(ATy);
4077 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
4079 DataLayout.isBigEndian());
4080 MaskIndex += CharsPerElt;
4081 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
4082 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
4083 R =
Builder.CreateInsertValue(R, T1, I);
4091 uint64_t RetKeyInstructionsSourceAtom) {
4106 auto *I =
Builder.CreateRetVoid();
4107 if (RetKeyInstructionsSourceAtom)
4114 llvm::DebugLoc RetDbgLoc;
4115 llvm::Value *RV =
nullptr;
4125 llvm::Function::arg_iterator EI =
CurFn->arg_end();
4127 llvm::Value *ArgStruct = &*EI;
4128 llvm::Value *SRet =
Builder.CreateStructGEP(
4137 auto AI =
CurFn->arg_begin();
4155 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
4182 RetDbgLoc = SI->getDebugLoc();
4184 RV = SI->getValueOperand();
4185 SI->eraseFromParent();
4208 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
4209 RT = FD->getReturnType();
4210 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
4211 RT = MD->getReturnType();
4213 RT =
BlockInfo->BlockExpression->getFunctionType()->getReturnType();
4215 llvm_unreachable(
"Unexpected function/method type");
4231 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
4236 unsigned unpaddedIndex = 0;
4237 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4238 auto coercedEltType = coercionType->getElementType(i);
4242 auto eltAddr =
Builder.CreateStructGEP(addr, i);
4245 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
4246 : unpaddedCoercionType,
4248 results.push_back(elt);
4252 if (results.size() == 1) {
4260 RV = llvm::PoisonValue::get(returnType);
4261 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
4262 RV =
Builder.CreateInsertValue(RV, results[i], i);
4269 RV =
CGM.getABIInfo().createCoercedLoad(
V, RetAI, *
this);
4274 llvm_unreachable(
"Invalid ABI kind for return argument");
4277 llvm::Instruction *Ret;
4283 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4290 Ret =
Builder.CreateRetVoid();
4294 Ret->setDebugLoc(std::move(RetDbgLoc));
4296 llvm::Value *Backup = RV ? Ret->getOperand(0) :
nullptr;
4297 if (RetKeyInstructionsSourceAtom)
4313 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4314 if (
SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
4315 RetNNAttr =
CurCodeDecl->getAttr<ReturnsNonNullAttr>();
4317 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4325 assert(!requiresReturnValueNullabilityCheck() &&
4326 "Cannot check nullability and the nonnull attribute");
4327 AttrLoc = RetNNAttr->getLocation();
4328 CheckKind = SanitizerKind::SO_ReturnsNonnullAttribute;
4329 Handler = SanitizerHandler::NonnullReturn;
4331 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4332 if (
auto *TSI = DD->getTypeSourceInfo())
4334 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4335 CheckKind = SanitizerKind::SO_NullabilityReturn;
4336 Handler = SanitizerHandler::NullabilityReturn;
4345 llvm::Value *SLocPtr =
Builder.CreateLoad(ReturnLocation,
"return.sloc.load");
4346 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4347 if (requiresReturnValueNullabilityCheck())
4349 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4350 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4356 llvm::Value *DynamicData[] = {SLocPtr};
4357 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, DynamicData);
4376 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4377 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4402 if (
type->isReferenceType()) {
4411 param->
hasAttr<NSConsumedAttr>() &&
type->isObjCRetainableType()) {
4412 llvm::Value *ptr =
Builder.CreateLoad(local);
4415 Builder.CreateStore(null, local);
4426 type->castAsRecordDecl()->isParamDestroyedInCallee() &&
4431 "cleanup for callee-destructed param not recorded");
4433 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4439 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4449 const LValue &srcLV = writeback.
Source;
4450 Address srcAddr = srcLV.getAddress();
4452 "shouldn't have writeback for provably null argument");
4460 llvm::BasicBlock *contBB =
nullptr;
4466 if (!provablyNonNull) {
4471 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4480 "icr.writeback-cast");
4489 if (writeback.
ToUse) {
4514 if (!provablyNonNull)
4523 for (
const auto &I : llvm::reverse(Cleanups)) {
4525 I.IsActiveIP->eraseFromParent();
4531 if (uop->getOpcode() == UO_AddrOf)
4532 return uop->getSubExpr();
4557 Address srcAddr = srcLV.getAddress();
4562 llvm::PointerType *destType =
4564 llvm::Type *destElemType =
4591 llvm::BasicBlock *contBB =
nullptr;
4592 llvm::BasicBlock *originBB =
nullptr;
4595 llvm::Value *finalArgument;
4599 if (provablyNonNull) {
4604 finalArgument = CGF.
Builder.CreateSelect(
4605 isNull, llvm::ConstantPointerNull::get(destType),
4611 originBB = CGF.
Builder.GetInsertBlock();
4614 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4616 condEval.
begin(CGF);
4620 llvm::Value *valueToUse =
nullptr;
4628 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4645 if (shouldCopy && !provablyNonNull) {
4646 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4651 llvm::PHINode *phiToUse =
4652 CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
"icr.to-use");
4653 phiToUse->addIncoming(valueToUse, copyBB);
4654 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4656 valueToUse = phiToUse;
4670 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4676 CGF.
Builder.CreateStackRestore(StackBase);
4683 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4684 SanOpts.has(SanitizerKind::NullabilityArg)))
4689 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4692 const NonNullAttr *NNAttr =
nullptr;
4693 if (
SanOpts.has(SanitizerKind::NonnullAttribute))
4696 bool CanCheckNullability =
false;
4697 if (
SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4698 !PVD->getType()->isRecordType()) {
4699 auto Nullability = PVD->getType()->getNullability();
4700 CanCheckNullability = Nullability &&
4702 PVD->getTypeSourceInfo();
4705 if (!NNAttr && !CanCheckNullability)
4712 AttrLoc = NNAttr->getLocation();
4713 CheckKind = SanitizerKind::SO_NonnullAttribute;
4714 Handler = SanitizerHandler::NonnullArg;
4716 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4717 CheckKind = SanitizerKind::SO_NullabilityArg;
4718 Handler = SanitizerHandler::NullabilityArg;
4723 llvm::Constant *StaticData[] = {
4726 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4728 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, {});
4734 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4735 SanOpts.has(SanitizerKind::NullabilityArg)))
4754 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4765 return classDecl->getTypeParamListAsWritten();
4769 return catDecl->getTypeParamList();
4779 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4783 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4784 "Can't skip parameters if type info is not provided");
4794 bool IsVariadic =
false;
4796 const auto *MD = dyn_cast<const ObjCMethodDecl *>(
Prototype.P);
4798 IsVariadic = MD->isVariadic();
4800 MD,
CGM.getTarget().getTriple().isOSWindows());
4801 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4802 MD->param_type_end());
4805 IsVariadic = FPT->isVariadic();
4806 ExplicitCC = FPT->getExtInfo().getCC();
4807 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4808 FPT->param_type_end());
4816 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4818 QualType ArgTy = (*Arg)->getType();
4819 if (
const auto *OBT = ParamTy->
getAs<OverflowBehaviorType>())
4820 ParamTy = OBT->getUnderlyingType();
4821 if (
const auto *OBT = ArgTy->
getAs<OverflowBehaviorType>())
4822 ArgTy = OBT->getUnderlyingType();
4825 getContext().getCanonicalType(ParamTy).getTypePtr() ==
4826 getContext().getCanonicalType(ArgTy).getTypePtr()) &&
4827 "type mismatch in call argument!");
4833 assert((Arg == ArgRange.end() || IsVariadic) &&
4834 "Extra arguments in non-variadic function!");
4839 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4840 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4841 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4849 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4853 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4862 auto SizeTy = Context.getSizeType();
4864 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4865 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(
4866 Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic());
4871 std::swap(Args.back(), *(&Args.back() - 1));
4877 "inalloca only supported on x86");
4882 size_t CallArgsStart = Args.size();
4883 for (
unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4884 unsigned Idx = LeftToRight ? I : E - I - 1;
4886 unsigned InitialArgSize = Args.size();
4890 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4894 "Argument and parameter types don't match");
4898 assert(InitialArgSize + 1 == Args.size() &&
4899 "The code below depends on only adding one arg per EmitCallArg");
4900 (void)InitialArgSize;
4903 if (!Args.back().hasLValue()) {
4904 RValue RVArg = Args.back().getKnownRValue();
4906 ParamsToSkip + Idx);
4910 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4917 std::reverse(Args.begin() + CallArgsStart, Args.end());
4926struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4959 if (!HasLV &&
RV.isScalar())
4961 else if (!HasLV &&
RV.isComplex())
4964 auto Addr = HasLV ?
LV.getAddress() :
RV.getAggregateAddress();
4968 HasLV ?
LV.isVolatileQualified()
4969 :
RV.isVolatileQualified());
4981 std::optional<DisableDebugLocationUpdates> Dis;
4985 dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4999 "reference binding to unmaterialized r-value!");
5011 if (
type->isRecordType() &&
5012 type->castAsRecordDecl()->isParamDestroyedInCallee()) {
5019 bool DestroyedInCallee =
true, NeedsCleanup =
true;
5020 if (
const auto *RD =
type->getAsCXXRecordDecl())
5021 DestroyedInCallee = RD->hasNonTrivialDestructor();
5023 NeedsCleanup =
type.isDestructedType();
5025 if (DestroyedInCallee)
5032 if (DestroyedInCallee && NeedsCleanup) {
5039 llvm::Instruction *IsActive =
5048 !
type->isArrayParameterType() && !
type.isNonTrivialToPrimitiveCopy()) {
5058QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
5062 if (!getTarget().
getTriple().isOSWindows())
5066 getContext().getTypeSize(Arg->
getType()) <
5070 return getContext().getIntPtrType();
5078void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
5079 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
5080 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
5081 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
5082 CGM.getNoObjCARCExceptionsMetadata());
5088 const llvm::Twine &name) {
5089 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value *>(), name);
5095 ArrayRef<Address> args,
5096 const llvm::Twine &name) {
5097 SmallVector<llvm::Value *, 3> values;
5098 for (
auto arg : args)
5099 values.push_back(
arg.emitRawPointer(*
this));
5100 return EmitNounwindRuntimeCall(callee, values, name);
5105 ArrayRef<llvm::Value *> args,
5106 const llvm::Twine &name) {
5107 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
5108 call->setDoesNotThrow();
5115 const llvm::Twine &name) {
5116 return EmitRuntimeCall(callee, {},
name);
5121SmallVector<llvm::OperandBundleDef, 1>
5130 if (
auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
5131 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
5132 auto IID = CalleeFn->getIntrinsicID();
5133 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
5146 const llvm::Twine &name) {
5147 llvm::CallInst *call = Builder.CreateCall(
5148 callee, args, getBundlesForFunclet(callee.getCallee()), name);
5149 call->setCallingConv(getRuntimeCC());
5151 if (CGM.shouldEmitConvergenceTokens() && call->isConvergent())
5163 llvm::InvokeInst *invoke =
Builder.CreateInvoke(
5165 invoke->setDoesNotReturn();
5168 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
5169 call->setDoesNotReturn();
5178 const Twine &name) {
5186 const Twine &name) {
5196 const Twine &Name) {
5201 llvm::CallBase *Inst;
5203 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
5206 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
5213 if (
CGM.getLangOpts().ObjCAutoRefCount)
5214 AddObjCARCExceptionMetadata(Inst);
5219void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
5221 DeferredReplacements.push_back(
5222 std::make_pair(llvm::WeakTrackingVH(Old),
New));
5229[[nodiscard]] llvm::AttributeList
5230maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
5231 const llvm::AttributeList &Attrs,
5232 llvm::Align NewAlign) {
5233 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
5234 if (CurAlign >= NewAlign)
5236 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
5237 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
5238 .addRetAttribute(Ctx, AlignAttr);
5241template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
5246 const AlignedAttrTy *AA =
nullptr;
5248 llvm::Value *Alignment =
nullptr;
5249 llvm::ConstantInt *OffsetCI =
nullptr;
5255 AA = FuncDecl->
getAttr<AlignedAttrTy>();
5260 [[nodiscard]] llvm::AttributeList
5261 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5262 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5264 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5269 if (!AlignmentCI->getValue().isPowerOf2())
5271 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5274 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5282 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
5286 AA->getLocation(), Alignment, OffsetCI);
5292class AssumeAlignedAttrEmitter final
5293 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5295 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl)
5296 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5301 if (Expr *Offset = AA->getOffset()) {
5303 if (OffsetCI->isNullValue())
5310class AllocAlignAttrEmitter final
5311 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5313 AllocAlignAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl,
5314 const CallArgList &CallArgs)
5315 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5319 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5328 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5329 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5330 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5333 unsigned MaxVectorWidth = 0;
5334 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5335 for (
auto *I : ST->elements())
5337 return MaxVectorWidth;
5344 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5346 bool IsVirtualFunctionPointerThunk) {
5349 assert(Callee.isOrdinary() || Callee.isVirtual());
5356 llvm::FunctionType *IRFuncTy =
getTypes().GetFunctionType(CallInfo);
5358 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5359 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5366 if ((TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5367 (TargetDecl->
hasAttr<TargetAttr>() ||
5371 TargetDecl->
hasAttr<TargetAttr>())))
5378 const FunctionDecl *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl);
5379 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
CGM, Loc, CallerDecl,
5380 CalleeDecl, CallArgs, RetTy);
5387 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5388 const llvm::DataLayout &DL =
CGM.getDataLayout();
5390 llvm::AllocaInst *AI;
5392 IP = IP->getNextNode();
5393 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5399 AI->setAlignment(Align.getAsAlign());
5400 AI->setUsedWithInAlloca(
true);
5401 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5402 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5405 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), CallInfo);
5411 bool NeedSRetLifetimeEnd =
false;
5417 if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.
isIndirect()) {
5419 IRFunctionArgs.getSRetArgNo(),
5428 if (IRFunctionArgs.hasSRetArg()) {
5441 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5459 assert(CallInfo.
arg_size() == CallArgs.size() &&
5460 "Mismatch between function signature & arguments.");
5463 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5464 I != E; ++I, ++info_it, ++ArgNo) {
5468 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5469 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5472 unsigned FirstIRArg, NumIRArgs;
5473 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5475 bool ArgHasMaybeUndefAttr =
5480 assert(NumIRArgs == 0);
5482 if (I->isAggregate()) {
5484 ? I->getKnownLValue().getAddress()
5485 : I->getKnownRValue().getAggregateAddress();
5486 llvm::Instruction *Placeholder =
5491 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5492 Builder.SetInsertPoint(Placeholder);
5505 deferPlaceholderReplacement(Placeholder,
Addr.getPointer());
5510 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5511 "indirect-arg-temp");
5512 I->copyInto(*
this,
Addr);
5521 I->copyInto(*
this,
Addr);
5528 assert(NumIRArgs == 1);
5529 if (I->isAggregate()) {
5539 ? I->getKnownLValue().getAddress()
5540 : I->getKnownRValue().getAggregateAddress();
5542 const llvm::DataLayout *TD = &
CGM.getDataLayout();
5544 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5545 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5546 TD->getAllocaAddrSpace()) &&
5547 "indirect argument must be in alloca address space");
5549 bool NeedCopy =
false;
5550 if (
Addr.getAlignment() < Align &&
5551 llvm::getOrEnforceKnownAlignment(
Addr.emitRawPointer(*
this),
5555 }
else if (I->hasLValue()) {
5556 auto LV = I->getKnownLValue();
5561 if (!isByValOrRef ||
5562 (LV.getAlignment() <
getContext().getTypeAlignInChars(I->Ty))) {
5566 if (isByValOrRef &&
Addr.getType()->getAddressSpace() !=
5575 auto *T = llvm::PointerType::get(
CGM.getLLVMContext(),
5583 if (ArgHasMaybeUndefAttr)
5584 Val =
Builder.CreateFreeze(Val);
5585 IRCallArgs[FirstIRArg] = Val;
5588 }
else if (I->getType()->isArrayParameterType()) {
5594 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5603 if (ArgHasMaybeUndefAttr)
5604 Val =
Builder.CreateFreeze(Val);
5605 IRCallArgs[FirstIRArg] = Val;
5610 CallLifetimeEndAfterCall.emplace_back(AI);
5613 I->copyInto(*
this, AI);
5618 assert(NumIRArgs == 0);
5626 assert(NumIRArgs == 1);
5628 if (!I->isAggregate())
5629 V = I->getKnownRValue().getScalarVal();
5632 I->hasLValue() ? I->getKnownLValue().getAddress()
5633 : I->getKnownRValue().getAggregateAddress());
5639 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5643 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5650 llvm::Value *errorValue =
Builder.CreateLoad(swiftErrorArg);
5651 Builder.CreateStore(errorValue, swiftErrorTemp);
5656 V->getType()->isIntegerTy())
5663 if (FirstIRArg < IRFuncTy->getNumParams() &&
5664 V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
5665 assert(
V->getType()->isPointerTy() &&
"Only pointers can mismatch!");
5669 if (ArgHasMaybeUndefAttr)
5671 IRCallArgs[FirstIRArg] =
V;
5675 llvm::StructType *STy =
5680 if (!I->isAggregate()) {
5682 I->copyInto(*
this, Src);
5684 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5685 : I->getKnownRValue().getAggregateAddress();
5695 llvm::TypeSize SrcTypeSize =
5696 CGM.getDataLayout().getTypeAllocSize(SrcTy);
5697 llvm::TypeSize DstTypeSize =
CGM.getDataLayout().getTypeAllocSize(STy);
5698 if (SrcTypeSize.isScalable()) {
5699 assert(STy->containsHomogeneousScalableVectorTypes() &&
5700 "ABI only supports structure with homogeneous scalable vector "
5702 assert(SrcTypeSize == DstTypeSize &&
5703 "Only allow non-fractional movement of structure with "
5704 "homogeneous scalable vector type");
5705 assert(NumIRArgs == STy->getNumElements());
5707 llvm::Value *StoredStructValue =
5709 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5710 llvm::Value *Extract =
Builder.CreateExtractValue(
5711 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5712 IRCallArgs[FirstIRArg + i] = Extract;
5715 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5716 uint64_t DstSize = DstTypeSize.getFixedValue();
5717 bool HasPFPFields =
getContext().hasPFPFields(I->Ty);
5723 if (HasPFPFields || SrcSize < DstSize) {
5734 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5740 assert(NumIRArgs == STy->getNumElements());
5741 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5743 llvm::Value *LI =
Builder.CreateLoad(EltPtr);
5744 if (ArgHasMaybeUndefAttr)
5745 LI =
Builder.CreateFreeze(LI);
5746 IRCallArgs[FirstIRArg + i] = LI;
5751 assert(NumIRArgs == 1);
5759 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5764 if (ArgHasMaybeUndefAttr)
5765 Load =
Builder.CreateFreeze(Load);
5766 IRCallArgs[FirstIRArg] = Load;
5774 auto layout =
CGM.getDataLayout().getStructLayout(coercionType);
5776 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
5780 bool NeedLifetimeEnd =
false;
5781 if (I->isAggregate()) {
5782 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5783 : I->getKnownRValue().getAggregateAddress();
5786 RValue RV = I->getKnownRValue();
5790 auto scalarAlign =
CGM.getDataLayout().getPrefTypeAlign(scalarType);
5795 layout->getAlignment(), scalarAlign)),
5797 nullptr, &AllocaAddr);
5805 unsigned IRArgPos = FirstIRArg;
5806 unsigned unpaddedIndex = 0;
5807 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5808 llvm::Type *eltType = coercionType->getElementType(i);
5815 : unpaddedCoercionType,
5817 if (ArgHasMaybeUndefAttr)
5818 elt =
Builder.CreateFreeze(elt);
5819 IRCallArgs[IRArgPos++] = elt;
5821 assert(IRArgPos == FirstIRArg + NumIRArgs);
5823 if (NeedLifetimeEnd)
5829 unsigned IRArgPos = FirstIRArg;
5830 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5831 assert(IRArgPos == FirstIRArg + NumIRArgs);
5837 if (!I->isAggregate()) {
5839 I->copyInto(*
this, Src);
5841 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5842 : I->getKnownRValue().getAggregateAddress();
5848 CGM.getABIInfo().createCoercedLoad(Src, ArgInfo, *
this);
5849 IRCallArgs[FirstIRArg] = Load;
5855 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*
this);
5861 assert(IRFunctionArgs.hasInallocaArg());
5862 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5873 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5874 llvm::Value *Ptr) -> llvm::Function * {
5875 if (!CalleeFT->isVarArg())
5879 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5880 if (CE->getOpcode() == llvm::Instruction::BitCast)
5881 Ptr = CE->getOperand(0);
5884 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5888 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5892 if (OrigFT->isVarArg() ||
5893 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5894 OrigFT->getReturnType() != CalleeFT->getReturnType())
5897 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5898 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5904 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5906 IRFuncTy = OrigFn->getFunctionType();
5917 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5918 LargestVectorWidth = std::max(LargestVectorWidth,
5923 llvm::AttributeList Attrs;
5924 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5929 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5931 CGM.Error(Loc,
"__vectorcall calling convention is not currently "
5936 if (FD->hasAttr<StrictFPAttr>())
5938 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5943 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5944 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
5949 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5953 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5958 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
5959 CallerDecl, CalleeDecl))
5961 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5966 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5975 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>()) &&
5976 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
5977 CallerDecl, CalleeDecl)) {
5979 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5984 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5991 CannotThrow =
false;
6000 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
6002 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
6003 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
6011 if (NeedSRetLifetimeEnd)
6019 if (
SanOpts.has(SanitizerKind::KCFI) &&
6020 !isa_and_nonnull<FunctionDecl>(TargetDecl))
6027 if (FD->hasAttr<StrictFPAttr>())
6029 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
6031 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
6032 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
6034 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
6035 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
6040 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
6043 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
6047 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
6048 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
6053 if (
CGM.getCodeGenOpts().CallGraphSection) {
6057 else if (
const auto *FPT =
6058 Callee.getAbstractInfo().getCalleeFunctionProtoType())
6062 "Cannot find the callee type to generate callee_type metadata.");
6066 CGM.createCalleeTypeMetadataForIcall(CST, *callOrInvoke);
6073 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
6074 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
6075 if (A->getGuard() == CFGuardAttr::GuardArg::nocf &&
6076 !CI->getCalledFunction())
6082 CI->setAttributes(Attrs);
6083 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
6087 if (!CI->getType()->isVoidTy())
6088 CI->setName(
"call");
6090 if (
CGM.shouldEmitConvergenceTokens() && CI->isConvergent())
6091 CI = addConvergenceControlToken(CI);
6094 LargestVectorWidth =
6100 if (!CI->getCalledFunction())
6101 PGO->valueProfile(
Builder, llvm::IPVK_IndirectCallTarget, CI, CalleePtr);
6105 if (
CGM.getLangOpts().ObjCAutoRefCount)
6106 AddObjCARCExceptionMetadata(CI);
6109 bool IsPPC =
getTarget().getTriple().isPPC();
6110 bool IsMIPS =
getTarget().getTriple().isMIPS();
6111 bool HasMips16 =
false;
6114 HasMips16 = TargetOpts.
FeatureMap.lookup(
"mips16");
6116 HasMips16 = llvm::is_contained(TargetOpts.
Features,
"+mips16");
6118 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
6119 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
6120 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
6121 else if (IsMustTail) {
6124 CGM.getDiags().Report(Loc, diag::err_aix_musttail_unsupported);
6127 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 0;
6128 else if (
Call->isIndirectCall())
6129 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 1;
6130 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
6135 CGM.addUndefinedGlobalForTailCall(
6138 llvm::GlobalValue::LinkageTypes
Linkage =
CGM.getFunctionLinkage(
6140 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
6141 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
6142 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail)
6150 CGM.getDiags().Report(Loc, diag::err_mips_impossible_musttail) << 0;
6151 else if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
6152 CGM.addUndefinedGlobalForTailCall({FD, Loc});
6154 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
6168 bool NeedSrcLoc = TargetDecl->
hasAttr<ErrorAttr>();
6169 if (!NeedSrcLoc &&
CGM.getCodeGenOpts().ShowInliningChain) {
6170 if (
const auto *FD = dyn_cast<FunctionDecl>(TargetDecl))
6171 NeedSrcLoc = FD->isInlined() || FD->hasAttr<AlwaysInlineAttr>() ||
6173 FD->isInAnonymousNamespace();
6177 auto *MD = llvm::ConstantAsMetadata::get(
Line);
6178 CI->setMetadata(
"srcloc", llvm::MDNode::get(
getLLVMContext(), {MD}));
6187 if (CI->doesNotReturn()) {
6188 if (NeedSRetLifetimeEnd)
6192 if (
SanOpts.has(SanitizerKind::Unreachable)) {
6195 if (
auto *F = CI->getCalledFunction())
6196 F->removeFnAttr(llvm::Attribute::NoReturn);
6197 CI->removeFnAttr(llvm::Attribute::NoReturn);
6201 if (
SanOpts.hasOneOf(SanitizerKind::Address |
6202 SanitizerKind::KernelAddress)) {
6204 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
6206 auto *FnType = llvm::FunctionType::get(
CGM.VoidTy,
false);
6207 llvm::FunctionCallee Fn =
6208 CGM.CreateRuntimeFunction(FnType,
"__asan_handle_no_return");
6214 Builder.ClearInsertionPoint();
6233 if (Cleanup && Cleanup->isFakeUse()) {
6234 CGBuilderTy::InsertPointGuard IPG(
Builder);
6236 Cleanup->getCleanup()->Emit(*
this, EHScopeStack::Cleanup::Flags());
6237 }
else if (!(Cleanup &&
6238 Cleanup->getCleanup()->isRedundantBeforeReturn())) {
6239 CGM.ErrorUnsupported(
MustTailCall,
"tail call skipping over cleanups");
6242 if (CI->getType()->isVoidTy())
6246 Builder.ClearInsertionPoint();
6252 if (swiftErrorTemp.
isValid()) {
6253 llvm::Value *errorResult =
Builder.CreateLoad(swiftErrorTemp);
6254 Builder.CreateStore(errorResult, swiftErrorArg);
6271 if (IsVirtualFunctionPointerThunk) {
6284 unsigned unpaddedIndex = 0;
6285 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
6286 llvm::Type *eltType = coercionType->getElementType(i);
6290 llvm::Value *elt = CI;
6291 if (requiresExtract)
6292 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
6294 assert(unpaddedIndex == 0);
6295 Builder.CreateStore(elt, eltAddr);
6303 if (NeedSRetLifetimeEnd)
6320 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
6321 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
6329 llvm::Value *
V = CI;
6330 if (
V->getType() != RetIRTy)
6340 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
6341 llvm::Value *
V = CI;
6342 if (
auto *ScalableSrcTy =
6343 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
6344 if (FixedDstTy->getElementType() ==
6345 ScalableSrcTy->getElementType()) {
6346 V =
Builder.CreateExtractVector(FixedDstTy,
V, uint64_t(0),
6356 getContext().getTypeInfoDataSizeInChars(RetTy).Width.getQuantity();
6360 DestIsVolatile =
false;
6361 DestSize =
getContext().getTypeSizeInChars(RetTy).getQuantity();
6371 CI, RetTy, StorePtr,
6385 DestIsVolatile =
false;
6387 CGM.getABIInfo().createCoercedStore(CI, StorePtr, RetAI, DestIsVolatile,
6394 llvm_unreachable(
"Invalid ABI kind for return argument");
6397 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6402 if (Ret.isScalar() && TargetDecl) {
6403 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6404 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6410 LifetimeEnd.Emit(*
this, {});
6422 if (CalleeDecl && !CalleeDecl->
hasAttr<NoDebugAttr>() &&
6423 DI->getCallSiteRelatedAttrs() != llvm::DINode::FlagZero) {
6424 CodeGenFunction CalleeCGF(
CGM);
6426 Callee.getAbstractInfo().getCalleeDecl();
6427 CalleeCGF.
CurGD = CalleeGlobalDecl;
6430 DI->EmitFuncDeclForCallSite(
6431 CI, DI->getFunctionType(CalleeDecl, ResTy, Args), CalleeGlobalDecl);
6434 DI->addCallTargetIfVirtual(CalleeDecl, CI);
6460 if (
VE->isMicrosoftABI())
6461 return CGM.getABIInfo().EmitMSVAArg(*
this, VAListAddr, Ty, Slot);
6462 return CGM.getABIInfo().EmitVAArg(*
this, VAListAddr, Ty, Slot);
6467 CGF.disableDebugInfo();
6471 CGF.enableDebugInfo();
static ExtParameterInfoList getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static CanQualTypeList getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsTargetDefaultMSABI)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static llvm::Value * CreatePFPCoercedLoad(Address Src, QualType SrcFETy, llvm::Type *Ty, CodeGenFunction &CGF)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
SmallVector< CanQualType, 16 > CanQualTypeList
static std::pair< llvm::Value *, bool > CoerceScalableToFixed(CodeGenFunction &CGF, llvm::FixedVectorType *ToTy, llvm::ScalableVectorType *FromTy, llvm::Value *V, StringRef Name="")
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static llvm::Value * CreateCoercedLoad(Address Src, QualType SrcFETy, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
SmallVector< FunctionProtoType::ExtParameterInfo, 16 > ExtParameterInfoList
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static CanQualTypeList getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static bool CreatePFPCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, CodeGenFunction &CGF)
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
static void addNoBuiltinAttributes(mlir::MLIRContext &ctx, mlir::NamedAttrList &attrs, const LangOptions &langOpts, const NoBuiltinAttr *nba=nullptr)
static void addDenormalModeAttrs(llvm::DenormalMode fpDenormalMode, llvm::DenormalMode fp32DenormalMode, mlir::NamedAttrList &attrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void appendParameterTypes(const CIRGenTypes &cgt, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > fpt)
Adds the formal parameters in FPT to the given prefix.
static const CIRGenFunctionInfo & arrangeFreeFunctionLikeCall(CIRGenTypes &cgt, CIRGenModule &cgm, const CallArgList &args, const FunctionType *fnType)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
#define CC_VLS_CASE(ABI_VLEN)
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static StringRef getTriple(const Command &Job)
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
CanQualType getCanonicalSizeType() const
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
std::vector< PFPField > findPFPFields(QualType Ty) const
Returns a list of PFP fields for the given type, including subfields in bases or other fields,...
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
This class is used for builtin types like 'int'.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
unsigned getNumBases() const
Retrieves the number of base classes of this class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const
ConstExprIterator const_arg_iterator
Represents a canonical, potentially-qualified type.
static CanQual< Type > CreateUnsafe(QualType Other)
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
unsigned getAddressSpace() const
Return the address space that this address resides in.
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
llvm::StringRef getName() const
Return the IR name of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
const ArgInfo * const_arg_iterator
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
const Decl * getDecl() const
unsigned getNumParams() const
bool hasFunctionDecl() const
An object to manage conditionally-evaluated expressions.
void begin(CodeGenFunction &CGF)
void end(CodeGenFunction &CGF)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
void callCStructDestructor(LValue Dst)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
const CodeGen::CGBlockInfo * BlockInfo
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void CreateCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
llvm::BasicBlock * getInvokeDest()
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
CGDebugInfo * getDebugInfo()
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
void EmitLifetimeEnd(llvm::Value *Addr)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
llvm::Type * ConvertTypeForMem(QualType T)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CallExpr * MustTailCall
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
This class organizes the cross-function state that is used while generating LLVM code.
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
ObjCEntrypoints & getObjCEntrypoints() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeDeviceKernelCallerDeclaration(QualType resultType, const FunctionArgList &args)
A device kernel caller function is an offload device entry point function with a target device depend...
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeFunctionDeclaration(const GlobalDecl GD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
A saved depth on the scope stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
static void initPointerAuthFnAttributes(const PointerAuthOptions &Opts, llvm::AttrBuilder &FuncAttrs)
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
const FunctionType * getFunctionType(bool BlocksToo=true) const
Looks through the Decl's underlying type to extract a FunctionType when possible.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
bool isZeroLengthBitField() const
Is this a zero-length bit-field?
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
@ SME_AgnosticZAStateMask
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
KernelReferenceKind getKernelReferenceKind() const
CXXDtorType getDtorType() const
const Decl * getDecl() const
This class represents temporary values used to represent inout and out arguments in HLSL.
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
field_iterator field_end() const
bool isParamDestroyedInCallee() const
field_iterator field_begin() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
bool isIncompleteArrayType() const
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
RecordDecl * castAsRecordDecl() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isRecordType() const
bool isObjCRetainableType() const
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool This(InterpState &S, CodePtr OpPC)
PRESERVE_NONE bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isa(CodeGen::Address addr)
static bool classof(const OMPClause *T)
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_VectorDeleting
Vector deleting dtor.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
@ Struct
The "struct" keyword introduces the elaborated-type-specifier.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
const Expr * WritebackExpr
An Expression (optional) that performs the writeback with any required casting.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
~DisableDebugLocationUpdates()
DisableDebugLocationUpdates(CodeGenFunction &CGF)
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.