34#include "llvm/ADT/STLExtras.h"
35#include "llvm/ADT/StringExtras.h"
36#include "llvm/Analysis/ValueTracking.h"
37#include "llvm/IR/Assumptions.h"
38#include "llvm/IR/AttributeMask.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/CallingConv.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/DebugInfoMetadata.h"
43#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/Type.h"
47#include "llvm/Transforms/Utils/Local.h"
57 return llvm::CallingConv::C;
59 return llvm::CallingConv::X86_StdCall;
61 return llvm::CallingConv::X86_FastCall;
63 return llvm::CallingConv::X86_RegCall;
65 return llvm::CallingConv::X86_ThisCall;
67 return llvm::CallingConv::Win64;
69 return llvm::CallingConv::X86_64_SysV;
71 return llvm::CallingConv::ARM_AAPCS;
73 return llvm::CallingConv::ARM_AAPCS_VFP;
75 return llvm::CallingConv::Intel_OCL_BI;
78 return llvm::CallingConv::C;
81 return llvm::CallingConv::X86_VectorCall;
83 return llvm::CallingConv::AArch64_VectorCall;
85 return llvm::CallingConv::AArch64_SVE_VectorCall;
87 return llvm::CallingConv::SPIR_FUNC;
89 return CGM.getTargetCodeGenInfo().getDeviceKernelCallingConv();
91 return llvm::CallingConv::PreserveMost;
93 return llvm::CallingConv::PreserveAll;
95 return llvm::CallingConv::Swift;
97 return llvm::CallingConv::SwiftTail;
99 return llvm::CallingConv::M68k_RTD;
101 return llvm::CallingConv::PreserveNone;
105#define CC_VLS_CASE(ABI_VLEN) \
106 case CC_RISCVVLSCall_##ABI_VLEN: \
107 return llvm::CallingConv::RISCV_VLSCall_##ABI_VLEN;
132 RecTy = Context.getCanonicalTagType(RD);
134 RecTy = Context.VoidTy;
139 return Context.getPointerType(RecTy);
172 assert(paramInfos.size() <= prefixArgs);
173 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
175 paramInfos.reserve(totalArgs);
178 paramInfos.resize(prefixArgs);
182 paramInfos.push_back(ParamInfo);
184 if (ParamInfo.hasPassObjectSize())
185 paramInfos.emplace_back();
188 assert(paramInfos.size() <= totalArgs &&
189 "Did we forget to insert pass_object_size args?");
191 paramInfos.resize(totalArgs);
201 if (!FPT->hasExtParameterInfos()) {
202 assert(paramInfos.empty() &&
203 "We have paramInfos, but the prototype doesn't?");
204 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
208 unsigned PrefixSize = prefix.size();
212 prefix.reserve(prefix.size() + FPT->getNumParams());
214 auto ExtInfos = FPT->getExtParameterInfos();
215 assert(ExtInfos.size() == FPT->getNumParams());
216 for (
unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
217 prefix.push_back(FPT->getParamType(I));
218 if (ExtInfos[I].hasPassObjectSize())
243 FTP->getExtInfo(), paramInfos,
Required);
253 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
258 bool IsTargetDefaultMSABI) {
263 if (D->
hasAttr<FastCallAttr>())
269 if (D->
hasAttr<ThisCallAttr>())
272 if (D->
hasAttr<VectorCallAttr>())
278 if (PcsAttr *PCS = D->
getAttr<PcsAttr>())
281 if (D->
hasAttr<AArch64VectorPcsAttr>())
284 if (D->
hasAttr<AArch64SVEPcsAttr>())
287 if (D->
hasAttr<DeviceKernelAttr>())
290 if (D->
hasAttr<IntelOclBiccAttr>())
299 if (D->
hasAttr<PreserveMostAttr>())
302 if (D->
hasAttr<PreserveAllAttr>())
308 if (D->
hasAttr<PreserveNoneAttr>())
311 if (D->
hasAttr<RISCVVectorCCAttr>())
314 if (RISCVVLSCCAttr *PCS = D->
getAttr<RISCVVLSCCAttr>()) {
315 switch (PCS->getVectorWidth()) {
317 llvm_unreachable(
"Invalid RISC-V VLS ABI VLEN");
318#define CC_VLS_CASE(ABI_VLEN) \
320 return CC_RISCVVLSCall_##ABI_VLEN;
355 return ::arrangeLLVMFunctionInfo(
356 *
this,
true, argTypes,
363 if (FD->
hasAttr<CUDAGlobalAttr>()) {
399 !Target.getCXXABI().hasConstructorVariants();
412 bool PassParams =
true;
414 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
417 if (
auto Inherited = CD->getInheritedConstructor())
429 if (!paramInfos.empty()) {
432 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
435 paramInfos.append(AddedArgs.
Suffix,
440 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
446 ? CGM.getContext().VoidPtrTy
449 argTypes, extInfo, paramInfos, required);
455 for (
auto &arg : args)
463 for (
auto &arg : args)
470 unsigned totalArgs) {
488 unsigned ExtraPrefixArgs,
unsigned ExtraSuffixArgs,
bool PassProtoArgs) {
490 for (
const auto &Arg : args)
491 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
494 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
499 FPT, TotalPrefixArgs + ExtraSuffixArgs)
505 ? CGM.getContext().VoidPtrTy
512 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
519 ArgTypes, Info, ParamInfos,
Required);
528 if (MD->isImplicitObjectMemberFunction())
536 if (DeviceKernelAttr::isOpenCLSpelling(FD->
getAttr<DeviceKernelAttr>()) &&
539 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
547 {}, noProto->getExtInfo(), {},
574 argTys.push_back(Context.getCanonicalParamType(receiverType));
576 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
578 argTys.push_back(Context.getCanonicalParamType(I->getType()));
580 I->hasAttr<NoEscapeAttr>());
581 extParamInfos.push_back(extParamInfo);
585 bool IsTargetDefaultMSABI =
591 if (
getContext().getLangOpts().ObjCAutoRefCount &&
592 MD->
hasAttr<NSReturnsRetainedAttr>())
629 assert(MD->
isVirtual() &&
"only methods have thunks");
646 ArgTys.push_back(*FTP->param_type_begin());
648 ArgTys.push_back(Context.IntTy);
649 CallingConv CC = Context.getDefaultCallingConvention(
661 unsigned numExtraRequiredArgs,
bool chainCall) {
662 assert(args.size() >= numExtraRequiredArgs);
672 if (proto->isVariadic())
675 if (proto->hasExtParameterInfos())
689 for (
const auto &arg : args)
694 paramInfos, required);
704 chainCall ? 1 : 0, chainCall);
733 for (
const auto &Arg : args)
734 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
774 assert(numPrefixArgs + 1 <= args.size() &&
775 "Emitting a call with less args than the required prefix?");
786 paramInfos, required);
797 assert(signature.
arg_size() <= args.size());
798 if (signature.
arg_size() == args.size())
803 if (!sigParamInfos.empty()) {
804 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
805 paramInfos.resize(args.size());
837 assert(llvm::all_of(argTypes,
841 llvm::FoldingSetNodeID ID;
846 bool isDelegateCall =
849 info, paramInfos, required, resultType, argTypes);
851 void *insertPos =
nullptr;
852 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
860 info, paramInfos, resultType, argTypes, required);
861 FunctionInfos.InsertNode(FI, insertPos);
863 bool inserted = FunctionsBeingProcessed.insert(FI).second;
865 assert(inserted &&
"Recursively being processed?");
868 if (CC == llvm::CallingConv::SPIR_KERNEL) {
875 CGM.getABIInfo().computeInfo(*FI);
886 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
889 bool erased = FunctionsBeingProcessed.erase(FI);
891 assert(erased &&
"Not in set?");
897 bool chainCall,
bool delegateCall,
903 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
907 void *buffer =
operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
908 argTypes.size() + 1, paramInfos.size()));
910 CGFunctionInfo *FI =
new (buffer) CGFunctionInfo();
911 FI->CallingConvention = llvmCC;
912 FI->EffectiveCallingConvention = llvmCC;
913 FI->ASTCallingConvention = info.
getCC();
914 FI->InstanceMethod = instanceMethod;
915 FI->ChainCall = chainCall;
916 FI->DelegateCall = delegateCall;
922 FI->Required = required;
925 FI->ArgStruct =
nullptr;
926 FI->ArgStructAlign = 0;
927 FI->NumArgs = argTypes.size();
928 FI->HasExtParameterInfos = !paramInfos.empty();
929 FI->getArgsBuffer()[0].
type = resultType;
930 FI->MaxVectorWidth = 0;
931 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
932 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
933 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
934 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
944struct TypeExpansion {
945 enum TypeExpansionKind {
957 const TypeExpansionKind Kind;
959 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
960 virtual ~TypeExpansion() {}
963struct ConstantArrayExpansion : TypeExpansion {
967 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
968 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
969 static bool classof(
const TypeExpansion *TE) {
970 return TE->Kind == TEK_ConstantArray;
974struct RecordExpansion : TypeExpansion {
975 SmallVector<const CXXBaseSpecifier *, 1> Bases;
977 SmallVector<const FieldDecl *, 1> Fields;
979 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
980 SmallVector<const FieldDecl *, 1> &&Fields)
981 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
982 Fields(std::move(Fields)) {}
983 static bool classof(
const TypeExpansion *TE) {
984 return TE->Kind == TEK_Record;
988struct ComplexExpansion : TypeExpansion {
991 ComplexExpansion(QualType EltTy) : TypeExpansion(
TEK_Complex), EltTy(EltTy) {}
992 static bool classof(
const TypeExpansion *TE) {
997struct NoExpansion : TypeExpansion {
998 NoExpansion() : TypeExpansion(TEK_None) {}
999 static bool classof(
const TypeExpansion *TE) {
return TE->Kind == TEK_None; }
1003static std::unique_ptr<TypeExpansion>
1006 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
1012 assert(!RD->hasFlexibleArrayMember() &&
1013 "Cannot expand structure with flexible array.");
1014 if (RD->isUnion()) {
1020 for (
const auto *FD : RD->fields()) {
1021 if (FD->isZeroLengthBitField())
1023 assert(!FD->isBitField() &&
1024 "Cannot expand structure with bit-field members.");
1025 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
1026 if (UnionSize < FieldSize) {
1027 UnionSize = FieldSize;
1032 Fields.push_back(LargestFD);
1034 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1035 assert(!CXXRD->isDynamicClass() &&
1036 "cannot expand vtable pointers in dynamic classes");
1037 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
1040 for (
const auto *FD : RD->fields()) {
1041 if (FD->isZeroLengthBitField())
1043 assert(!FD->isBitField() &&
1044 "Cannot expand structure with bit-field members.");
1045 Fields.push_back(FD);
1048 return std::make_unique<RecordExpansion>(std::move(Bases),
1052 return std::make_unique<ComplexExpansion>(CT->getElementType());
1054 return std::make_unique<NoExpansion>();
1059 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1062 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1064 for (
auto BS : RExp->Bases)
1066 for (
auto FD : RExp->Fields)
1079 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1080 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1083 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1084 for (
auto BS : RExp->Bases)
1086 for (
auto FD : RExp->Fields)
1088 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1099 ConstantArrayExpansion *CAE,
1101 llvm::function_ref<
void(
Address)> Fn) {
1102 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1108void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1109 llvm::Function::arg_iterator &AI) {
1110 assert(LV.isSimple() &&
1111 "Unexpected non-simple lvalue during struct expansion.");
1114 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1116 *
this, CAExp, LV.getAddress(), [&](Address EltAddr) {
1117 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1118 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1120 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1121 Address
This = LV.getAddress();
1122 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1126 false, SourceLocation());
1127 LValue SubLV = MakeAddrLValue(Base, BS->
getType());
1130 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1132 for (
auto FD : RExp->Fields) {
1134 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1135 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1138 auto realValue = &*AI++;
1139 auto imagValue = &*AI++;
1140 EmitStoreOfComplex(
ComplexPairTy(realValue, imagValue), LV,
true);
1145 llvm::Value *Arg = &*AI++;
1146 if (LV.isBitField()) {
1152 if (Arg->getType()->isPointerTy()) {
1153 Address
Addr = LV.getAddress();
1154 Arg = Builder.CreateBitCast(Arg,
Addr.getElementType());
1156 EmitStoreOfScalar(Arg, LV);
1161void CodeGenFunction::ExpandTypeToArgs(
1162 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1163 SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos) {
1165 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1170 CallArg(convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1172 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1175 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1178 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1182 false, SourceLocation());
1186 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1190 LValue LV = MakeAddrLValue(This, Ty);
1191 for (
auto FD : RExp->Fields) {
1193 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1194 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1199 IRCallArgs[IRCallArgPos++] = CV.first;
1200 IRCallArgs[IRCallArgPos++] = CV.second;
1204 assert(RV.isScalar() &&
1205 "Unexpected non-scalar rvalue during struct expansion.");
1208 llvm::Value *
V = RV.getScalarVal();
1209 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1210 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1211 V = Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1213 IRCallArgs[IRCallArgPos++] =
V;
1221 const Twine &Name =
"tmp") {
1234 llvm::StructType *SrcSTy,
1238 if (SrcSTy->getNumElements() == 0)
1247 uint64_t FirstEltSize = CGF.
CGM.
getDataLayout().getTypeStoreSize(FirstElt);
1248 if (FirstEltSize < DstSize &&
1257 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1272 if (Val->getType() == Ty)
1278 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1284 llvm::Type *DestIntTy = Ty;
1288 if (Val->getType() != DestIntTy) {
1290 if (DL.isBigEndian()) {
1293 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1294 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1296 if (SrcSize > DstSize) {
1297 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1298 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1300 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1301 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1305 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1310 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1331 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1333 DstSize.getFixedValue(), CGF);
1348 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1349 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1363 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1364 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1367 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1368 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1369 ScalableDstTy = llvm::ScalableVectorType::get(
1370 FixedSrcTy->getElementType(),
1372 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
1374 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1376 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1377 llvm::Value *Result = CGF.
Builder.CreateInsertVector(
1378 ScalableDstTy, PoisonVec, Load, uint64_t(0),
"cast.scalable");
1380 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, Ty));
1381 if (Result->getType() != ScalableDstTy)
1382 Result = CGF.
Builder.CreateBitCast(Result, ScalableDstTy);
1383 if (Result->getType() != Ty)
1384 Result = CGF.
Builder.CreateExtractVector(Ty, Result, uint64_t(0));
1396 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1401 llvm::TypeSize DstSize,
1402 bool DstIsVolatile) {
1406 llvm::Type *SrcTy = Src->getType();
1407 llvm::TypeSize SrcSize =
CGM.getDataLayout().getTypeAllocSize(SrcTy);
1413 if (llvm::StructType *DstSTy =
1415 assert(!SrcSize.isScalable());
1417 SrcSize.getFixedValue(), *
this);
1421 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1422 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1426 auto *I =
Builder.CreateStore(Src, Dst, DstIsVolatile);
1428 }
else if (llvm::StructType *STy =
1429 dyn_cast<llvm::StructType>(Src->getType())) {
1432 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1434 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1435 auto *I =
Builder.CreateStore(Elt, EltPtr, DstIsVolatile);
1443 }
else if (SrcTy->isIntegerTy()) {
1445 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1462 Builder.CreateStore(Src, Tmp);
1463 auto *I =
Builder.CreateMemCpy(
1482static std::pair<llvm::Value *, bool>
1484 llvm::ScalableVectorType *FromTy, llvm::Value *
V,
1485 StringRef Name =
"") {
1488 if (FromTy->getElementType()->isIntegerTy(1) &&
1489 ToTy->getElementType() == CGF.
Builder.getInt8Ty()) {
1490 if (!FromTy->getElementCount().isKnownMultipleOf(8)) {
1491 FromTy = llvm::ScalableVectorType::get(
1492 FromTy->getElementType(),
1493 llvm::alignTo<8>(FromTy->getElementCount().getKnownMinValue()));
1494 llvm::Value *ZeroVec = llvm::Constant::getNullValue(FromTy);
1495 V = CGF.
Builder.CreateInsertVector(FromTy, ZeroVec,
V, uint64_t(0));
1497 FromTy = llvm::ScalableVectorType::get(
1498 ToTy->getElementType(),
1499 FromTy->getElementCount().getKnownMinValue() / 8);
1500 V = CGF.
Builder.CreateBitCast(
V, FromTy);
1502 if (FromTy->getElementType() == ToTy->getElementType()) {
1503 V->setName(Name +
".coerce");
1504 V = CGF.
Builder.CreateExtractVector(ToTy,
V, uint64_t(0),
"cast.fixed");
1514class ClangToLLVMArgMapping {
1515 static const unsigned InvalidIndex = ~0U;
1516 unsigned InallocaArgNo;
1518 unsigned TotalIRArgs;
1522 unsigned PaddingArgIndex;
1525 unsigned FirstArgIndex;
1526 unsigned NumberOfArgs;
1529 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1533 SmallVector<IRArgs, 8> ArgInfo;
1536 ClangToLLVMArgMapping(
const ASTContext &Context,
const CGFunctionInfo &FI,
1537 bool OnlyRequiredArgs =
false)
1538 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1539 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1540 construct(Context, FI, OnlyRequiredArgs);
1543 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1544 unsigned getInallocaArgNo()
const {
1545 assert(hasInallocaArg());
1546 return InallocaArgNo;
1549 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1550 unsigned getSRetArgNo()
const {
1551 assert(hasSRetArg());
1555 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1557 bool hasPaddingArg(
unsigned ArgNo)
const {
1558 assert(ArgNo < ArgInfo.size());
1559 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1561 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1562 assert(hasPaddingArg(ArgNo));
1563 return ArgInfo[ArgNo].PaddingArgIndex;
1568 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1569 assert(ArgNo < ArgInfo.size());
1570 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1571 ArgInfo[ArgNo].NumberOfArgs);
1575 void construct(
const ASTContext &Context,
const CGFunctionInfo &FI,
1576 bool OnlyRequiredArgs);
1579void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1580 const CGFunctionInfo &FI,
1581 bool OnlyRequiredArgs) {
1582 unsigned IRArgNo = 0;
1583 bool SwapThisWithSRet =
false;
1588 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1596 QualType ArgType = I->type;
1597 const ABIArgInfo &AI = I->info;
1599 auto &IRArgs = ArgInfo[ArgNo];
1602 IRArgs.PaddingArgIndex = IRArgNo++;
1609 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1611 IRArgs.NumberOfArgs = STy->getNumElements();
1613 IRArgs.NumberOfArgs = 1;
1619 IRArgs.NumberOfArgs = 1;
1624 IRArgs.NumberOfArgs = 0;
1634 if (IRArgs.NumberOfArgs > 0) {
1635 IRArgs.FirstArgIndex = IRArgNo;
1636 IRArgNo += IRArgs.NumberOfArgs;
1641 if (IRArgNo == 1 && SwapThisWithSRet)
1644 assert(ArgNo == ArgInfo.size());
1647 InallocaArgNo = IRArgNo++;
1649 TotalIRArgs = IRArgNo;
1657 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1672 switch (BT->getKind()) {
1675 case BuiltinType::Float:
1677 case BuiltinType::Double:
1679 case BuiltinType::LongDouble:
1690 if (BT->getKind() == BuiltinType::LongDouble)
1691 return getTarget().useObjCFP2RetForComplexLongDouble();
1705 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1707 assert(Inserted &&
"Recursively being processed?");
1709 llvm::Type *resultType =
nullptr;
1714 llvm_unreachable(
"Invalid ABI kind for return argument");
1726 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1727 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1743 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1747 if (IRFunctionArgs.hasSRetArg()) {
1748 ArgTypes[IRFunctionArgs.getSRetArgNo()] = llvm::PointerType::get(
1753 if (IRFunctionArgs.hasInallocaArg())
1754 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1761 for (; it != ie; ++it, ++ArgNo) {
1765 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1766 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1769 unsigned FirstIRArg, NumIRArgs;
1770 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1775 assert(NumIRArgs == 0);
1779 assert(NumIRArgs == 1);
1781 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1785 assert(NumIRArgs == 1);
1786 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1795 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1797 assert(NumIRArgs == st->getNumElements());
1798 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1799 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1801 assert(NumIRArgs == 1);
1802 ArgTypes[FirstIRArg] = argType;
1808 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1810 *ArgTypesIter++ = EltTy;
1812 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1817 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1819 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1824 bool Erased = FunctionsBeingProcessed.erase(&FI);
1826 assert(Erased &&
"Not in set?");
1828 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1842 llvm::AttrBuilder &FuncAttrs,
1849 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1853 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1855 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1857 FuncAttrs.addAttribute(
"aarch64_za_state_agnostic");
1861 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1863 FuncAttrs.addAttribute(
"aarch64_in_za");
1865 FuncAttrs.addAttribute(
"aarch64_out_za");
1867 FuncAttrs.addAttribute(
"aarch64_inout_za");
1871 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1873 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1875 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1877 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1881 const Decl *Callee) {
1887 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1888 AA->getAssumption().split(Attrs,
",");
1891 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1892 llvm::join(Attrs.begin(), Attrs.end(),
","));
1899 if (
const RecordType *RT =
1901 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1902 return ClassDecl->hasTrivialDestructor();
1908 const Decl *TargetDecl) {
1914 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1918 if (!
Module.getLangOpts().CPlusPlus)
1921 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1922 if (FDecl->isExternC())
1924 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1926 if (VDecl->isExternC())
1934 return Module.getCodeGenOpts().StrictReturn ||
1935 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1936 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1943 llvm::DenormalMode FP32DenormalMode,
1944 llvm::AttrBuilder &FuncAttrs) {
1945 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1946 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1948 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1949 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1957 llvm::AttrBuilder &FuncAttrs) {
1963 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1965 llvm::AttrBuilder &FuncAttrs) {
1968 if (CodeGenOpts.OptimizeSize)
1969 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1970 if (CodeGenOpts.OptimizeSize == 2)
1971 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1974 if (CodeGenOpts.DisableRedZone)
1975 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1976 if (CodeGenOpts.IndirectTlsSegRefs)
1977 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1978 if (CodeGenOpts.NoImplicitFloat)
1979 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1981 if (AttrOnCallSite) {
1986 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1988 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1990 switch (CodeGenOpts.getFramePointer()) {
1998 FuncAttrs.addAttribute(
"frame-pointer",
2000 CodeGenOpts.getFramePointer()));
2003 if (CodeGenOpts.LessPreciseFPMAD)
2004 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
2006 if (CodeGenOpts.NullPointerIsValid)
2007 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
2010 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
2014 if (LangOpts.NoHonorInfs)
2015 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
2016 if (LangOpts.NoHonorNaNs)
2017 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
2018 if (CodeGenOpts.SoftFloat)
2019 FuncAttrs.addAttribute(
"use-soft-float",
"true");
2020 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
2021 llvm::utostr(CodeGenOpts.SSPBufferSize));
2022 if (LangOpts.NoSignedZero)
2023 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
2026 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
2027 if (!Recips.empty())
2028 FuncAttrs.addAttribute(
"reciprocal-estimates", llvm::join(Recips,
","));
2032 FuncAttrs.addAttribute(
"prefer-vector-width",
2035 if (CodeGenOpts.StackRealignment)
2036 FuncAttrs.addAttribute(
"stackrealign");
2037 if (CodeGenOpts.Backchain)
2038 FuncAttrs.addAttribute(
"backchain");
2039 if (CodeGenOpts.EnableSegmentedStacks)
2040 FuncAttrs.addAttribute(
"split-stack");
2042 if (CodeGenOpts.SpeculativeLoadHardening)
2043 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2046 switch (CodeGenOpts.getZeroCallUsedRegs()) {
2047 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
2048 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2050 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
2051 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
2053 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
2054 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
2056 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
2057 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
2059 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
2060 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
2062 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
2063 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
2065 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2066 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2068 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2069 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2071 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2072 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2083 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2088 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2089 LangOpts.SYCLIsDevice) {
2090 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2093 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2094 FuncAttrs.addAttribute(
"save-reg-params");
2097 StringRef Var,
Value;
2099 FuncAttrs.addAttribute(Var,
Value);
2113 const llvm::Function &F,
2115 auto FFeatures = F.getFnAttribute(
"target-features");
2117 llvm::StringSet<> MergedNames;
2119 MergedFeatures.reserve(TargetOpts.
Features.size());
2121 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2122 for (StringRef
Feature : FeatureRange) {
2126 StringRef Name =
Feature.drop_front(1);
2127 bool Merged = !MergedNames.insert(Name).second;
2129 MergedFeatures.push_back(
Feature);
2133 if (FFeatures.isValid())
2134 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2135 AddUnmergedFeatures(TargetOpts.
Features);
2137 if (!MergedFeatures.empty()) {
2138 llvm::sort(MergedFeatures);
2139 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2146 bool WillInternalize) {
2148 llvm::AttrBuilder FuncAttrs(F.getContext());
2151 if (!TargetOpts.
CPU.empty())
2152 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2153 if (!TargetOpts.
TuneCPU.empty())
2154 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2157 CodeGenOpts, LangOpts,
2160 if (!WillInternalize && F.isInterposable()) {
2165 F.addFnAttrs(FuncAttrs);
2169 llvm::AttributeMask AttrsToRemove;
2171 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2172 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2173 llvm::DenormalMode Merged =
2177 if (DenormModeToMergeF32.isValid()) {
2182 if (Merged == llvm::DenormalMode::getDefault()) {
2183 AttrsToRemove.addAttribute(
"denormal-fp-math");
2184 }
else if (Merged != DenormModeToMerge) {
2186 FuncAttrs.addAttribute(
"denormal-fp-math",
2190 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2191 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2192 }
else if (MergedF32 != DenormModeToMergeF32) {
2194 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2198 F.removeFnAttrs(AttrsToRemove);
2203 F.addFnAttrs(FuncAttrs);
2206void CodeGenModule::getTrivialDefaultFunctionAttributes(
2207 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2208 llvm::AttrBuilder &FuncAttrs) {
2210 getLangOpts(), AttrOnCallSite,
2214void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2216 bool AttrOnCallSite,
2217 llvm::AttrBuilder &FuncAttrs) {
2221 if (!AttrOnCallSite)
2227 if (!AttrOnCallSite)
2232 llvm::AttrBuilder &attrs) {
2233 getDefaultFunctionAttributes(
"",
false,
2235 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2240 const NoBuiltinAttr *NBA =
nullptr) {
2241 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2243 AttributeName +=
"no-builtin-";
2244 AttributeName += BuiltinName;
2245 FuncAttrs.addAttribute(AttributeName);
2249 if (LangOpts.NoBuiltin) {
2251 FuncAttrs.addAttribute(
"no-builtins");
2265 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2266 FuncAttrs.addAttribute(
"no-builtins");
2271 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2275 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2276 bool CheckCoerce =
true) {
2283 if (!DL.typeSizeEqualsStoreSize(Ty))
2290 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2291 DL.getTypeSizeInBits(Ty)))
2315 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2317 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2326 unsigned NumRequiredArgs,
unsigned ArgNo) {
2327 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2332 if (ArgNo >= NumRequiredArgs)
2336 if (ArgNo < FD->getNumParams()) {
2337 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2338 if (Param && Param->hasAttr<MaybeUndefAttr>())
2355 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2358 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2360 llvm::all_of(ST->elements(),
2361 llvm::AttributeFuncs::isNoFPClassCompatibleType);
2369 llvm::FPClassTest Mask = llvm::fcNone;
2370 if (LangOpts.NoHonorInfs)
2371 Mask |= llvm::fcInf;
2372 if (LangOpts.NoHonorNaNs)
2373 Mask |= llvm::fcNan;
2379 llvm::AttributeList &Attrs) {
2380 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2381 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2382 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2408 llvm::AttributeList &AttrList,
2410 bool AttrOnCallSite,
bool IsThunk) {
2418 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2420 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2431 bool HasOptnone =
false;
2433 const NoBuiltinAttr *NBA =
nullptr;
2437 std::optional<llvm::Attribute::AttrKind> MemAttrForPtrArgs;
2438 bool AddedPotentialArgAccess =
false;
2439 auto AddPotentialArgAccess = [&]() {
2440 AddedPotentialArgAccess =
true;
2441 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2443 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2444 llvm::MemoryEffects::argMemOnly());
2451 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2452 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2453 if (TargetDecl->
hasAttr<NoThrowAttr>())
2454 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2455 if (TargetDecl->
hasAttr<NoReturnAttr>())
2456 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2457 if (TargetDecl->
hasAttr<ColdAttr>())
2458 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2459 if (TargetDecl->
hasAttr<HotAttr>())
2460 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2461 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2462 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2463 if (TargetDecl->
hasAttr<ConvergentAttr>())
2464 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2466 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2469 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2471 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2473 (Kind == OO_New || Kind == OO_Array_New))
2474 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2477 const bool IsVirtualCall = MD && MD->
isVirtual();
2480 if (!(AttrOnCallSite && IsVirtualCall)) {
2481 if (Fn->isNoReturn())
2482 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2483 NBA = Fn->getAttr<NoBuiltinAttr>();
2490 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2491 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2495 if (TargetDecl->
hasAttr<ConstAttr>()) {
2496 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2497 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2500 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2501 MemAttrForPtrArgs = llvm::Attribute::ReadNone;
2502 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2503 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2504 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2506 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2507 MemAttrForPtrArgs = llvm::Attribute::ReadOnly;
2508 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2509 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2510 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2512 if (
const auto *RA = TargetDecl->
getAttr<RestrictAttr>();
2513 RA && RA->getDeallocator() ==
nullptr)
2514 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2515 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2516 !CodeGenOpts.NullPointerIsValid)
2517 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2518 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2519 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2520 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2521 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2522 if (TargetDecl->
hasAttr<LeafAttr>())
2523 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2524 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2525 FuncAttrs.addAttribute(
"bpf_fastcall");
2527 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2528 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2529 std::optional<unsigned> NumElemsParam;
2530 if (AllocSize->getNumElemsParam().isValid())
2531 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2532 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2536 if (DeviceKernelAttr::isOpenCLSpelling(
2537 TargetDecl->
getAttr<DeviceKernelAttr>()) &&
2544 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2551 FuncAttrs.addAttribute(
2552 "uniform-work-group-size",
2553 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2557 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2559 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2561 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2562 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2564 if (
auto *ModularFormat = TargetDecl->
getAttr<ModularFormatAttr>()) {
2565 FormatAttr *Format = TargetDecl->
getAttr<FormatAttr>();
2566 StringRef
Type = Format->getType()->getName();
2567 std::string FormatIdx = std::to_string(Format->getFormatIdx());
2568 std::string FirstArg = std::to_string(Format->getFirstArg());
2570 Type, FormatIdx, FirstArg,
2571 ModularFormat->getModularImplFn()->getName(),
2572 ModularFormat->getImplName()};
2573 llvm::append_range(Args, ModularFormat->aspects());
2574 FuncAttrs.addAttribute(
"modular-format", llvm::join(Args,
","));
2587 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2592 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2593 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2594 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2595 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2596 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2597 FuncAttrs.removeAttribute(
"split-stack");
2598 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2601 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2602 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2603 FuncAttrs.addAttribute(
2604 "zero-call-used-regs",
2605 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2612 if (CodeGenOpts.NoPLT) {
2613 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2614 if (!Fn->isDefined() && !AttrOnCallSite) {
2615 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2620 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2621 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2626 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2627 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2628 if (!FD->isExternallyVisible())
2629 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2636 if (!AttrOnCallSite) {
2637 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2638 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2641 auto shouldDisableTailCalls = [&] {
2643 if (CodeGenOpts.DisableTailCalls)
2649 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2650 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2653 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2654 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2655 if (!BD->doesNotEscape())
2661 if (shouldDisableTailCalls())
2662 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2667 static const llvm::StringSet<> ReturnsTwiceFn{
2668 "_setjmpex",
"setjmp",
"_setjmp",
"vfork",
2669 "sigsetjmp",
"__sigsetjmp",
"savectx",
"getcontext"};
2670 if (ReturnsTwiceFn.contains(Name))
2671 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2675 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2678 if (!MSHotPatchFunctions.empty()) {
2679 bool IsHotPatched = llvm::binary_search(MSHotPatchFunctions, Name);
2681 FuncAttrs.addAttribute(
"marked_for_windows_hot_patching");
2686 if (CodeGenOpts.isLoaderReplaceableFunctionName(Name))
2687 FuncAttrs.addAttribute(
"loader-replaceable");
2690 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2697 if (CodeGenOpts.EnableNoundefAttrs &&
2701 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2707 RetAttrs.addAttribute(llvm::Attribute::SExt);
2709 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2711 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2716 RetAttrs.addAttribute(llvm::Attribute::InReg);
2728 AddPotentialArgAccess();
2737 llvm_unreachable(
"Invalid ABI kind for return argument");
2745 RetAttrs.addDereferenceableAttr(
2747 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2748 !CodeGenOpts.NullPointerIsValid)
2749 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2751 llvm::Align Alignment =
2753 RetAttrs.addAlignmentAttr(Alignment);
2758 bool hasUsedSRet =
false;
2762 if (IRFunctionArgs.hasSRetArg()) {
2764 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2765 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2766 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2769 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2771 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2776 if (IRFunctionArgs.hasInallocaArg()) {
2779 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2788 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2790 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2796 if (!CodeGenOpts.NullPointerIsValid &&
2798 Attrs.addAttribute(llvm::Attribute::NonNull);
2805 Attrs.addDereferenceableOrNullAttr(
2811 llvm::Align Alignment =
2815 Attrs.addAlignmentAttr(Alignment);
2817 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2822 I != E; ++I, ++ArgNo) {
2828 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2830 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2833 .addAttribute(llvm::Attribute::InReg));
2838 if (CodeGenOpts.EnableNoundefAttrs &&
2840 Attrs.addAttribute(llvm::Attribute::NoUndef);
2849 Attrs.addAttribute(llvm::Attribute::SExt);
2851 Attrs.addAttribute(llvm::Attribute::ZExt);
2853 Attrs.addAttribute(llvm::Attribute::NoExt);
2858 Attrs.addAttribute(llvm::Attribute::Nest);
2860 Attrs.addAttribute(llvm::Attribute::InReg);
2861 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2868 Attrs.addAttribute(llvm::Attribute::InReg);
2880 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2888 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo());
2893 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2894 Decl->getArgPassingRestrictions() ==
2898 Attrs.addAttribute(llvm::Attribute::NoAlias);
2923 AddPotentialArgAccess();
2928 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2939 AddPotentialArgAccess();
2947 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2948 !CodeGenOpts.NullPointerIsValid)
2949 Attrs.addAttribute(llvm::Attribute::NonNull);
2951 llvm::Align Alignment =
2953 Attrs.addAlignmentAttr(Alignment);
2962 DeviceKernelAttr::isOpenCLSpelling(
2963 TargetDecl->
getAttr<DeviceKernelAttr>()) &&
2967 llvm::Align Alignment =
2969 Attrs.addAlignmentAttr(Alignment);
2976 Attrs.addAttribute(llvm::Attribute::NoAlias);
2985 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2990 Attrs.addAttribute(llvm::Attribute::NoAlias);
2994 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2995 auto info =
getContext().getTypeInfoInChars(PTy);
2996 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2997 Attrs.addAlignmentAttr(info.Align.getAsAlign());
3003 Attrs.addAttribute(llvm::Attribute::SwiftError);
3007 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
3011 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
3016 Attrs.addCapturesAttr(llvm::CaptureInfo::none());
3018 if (Attrs.hasAttributes()) {
3019 unsigned FirstIRArg, NumIRArgs;
3020 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3021 for (
unsigned i = 0; i < NumIRArgs; i++)
3022 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
3029 if (AddedPotentialArgAccess && MemAttrForPtrArgs) {
3033 I != E; ++I, ++ArgNo) {
3034 if (I->info.isDirect() || I->info.isExpand() ||
3035 I->info.isCoerceAndExpand()) {
3036 unsigned FirstIRArg, NumIRArgs;
3037 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3038 for (
unsigned i = FirstIRArg; i < FirstIRArg + NumIRArgs; ++i) {
3048 AttrList = llvm::AttributeList::get(
3057 llvm::Value *value) {
3058 llvm::Type *varType = CGF.
ConvertType(var->getType());
3062 if (value->getType() == varType)
3065 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) &&
3066 "unexpected promotion type");
3069 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
3071 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
3077 QualType ArgType,
unsigned ArgNo) {
3085 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
3089 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
3096 if (NNAttr->isNonNull(ArgNo))
3103struct CopyBackSwiftError final : EHScopeStack::Cleanup {
3106 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(
arg) {}
3107 void Emit(CodeGenFunction &CGF, Flags flags)
override {
3126 if (FD->hasImplicitReturnZero()) {
3127 QualType RetTy = FD->getReturnType().getUnqualifiedType();
3128 llvm::Type *LLVMTy =
CGM.getTypes().ConvertType(RetTy);
3129 llvm::Constant *
Zero = llvm::Constant::getNullValue(LLVMTy);
3137 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), FI);
3138 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
3143 if (IRFunctionArgs.hasInallocaArg())
3144 ArgStruct =
Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
3148 if (IRFunctionArgs.hasSRetArg()) {
3149 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
3150 AI->setName(
"agg.result");
3151 AI->addAttr(llvm::Attribute::NoAlias);
3158 ArgVals.reserve(Args.size());
3164 assert(FI.
arg_size() == Args.size() &&
3165 "Mismatch between function signature & arguments.");
3168 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e;
3169 ++i, ++info_it, ++ArgNo) {
3182 unsigned FirstIRArg, NumIRArgs;
3183 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3187 assert(NumIRArgs == 0);
3200 assert(NumIRArgs == 1);
3223 llvm::ConstantInt::get(
IntPtrTy, Size.getQuantity()));
3224 ParamAddr = AlignedTemp;
3241 auto AI = Fn->getArg(FirstIRArg);
3249 assert(NumIRArgs == 1);
3251 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3254 PVD->getFunctionScopeIndex()) &&
3255 !
CGM.getCodeGenOpts().NullPointerIsValid)
3256 AI->addAttr(llvm::Attribute::NonNull);
3258 QualType OTy = PVD->getOriginalType();
3259 if (
const auto *ArrTy =
getContext().getAsConstantArrayType(OTy)) {
3265 QualType ETy = ArrTy->getElementType();
3266 llvm::Align Alignment =
3267 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3269 .addAlignmentAttr(Alignment));
3270 uint64_t ArrSize = ArrTy->getZExtSize();
3274 Attrs.addDereferenceableAttr(
3275 getContext().getTypeSizeInChars(ETy).getQuantity() *
3277 AI->addAttrs(Attrs);
3278 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3280 !
CGM.getCodeGenOpts().NullPointerIsValid) {
3281 AI->addAttr(llvm::Attribute::NonNull);
3284 }
else if (
const auto *ArrTy =
3290 QualType ETy = ArrTy->getElementType();
3291 llvm::Align Alignment =
3292 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3294 .addAlignmentAttr(Alignment));
3295 if (!
getTypes().getTargetAddressSpace(ETy) &&
3296 !
CGM.getCodeGenOpts().NullPointerIsValid)
3297 AI->addAttr(llvm::Attribute::NonNull);
3302 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3305 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3306 if (AVAttr && !
SanOpts.has(SanitizerKind::Alignment)) {
3310 llvm::ConstantInt *AlignmentCI =
3312 uint64_t AlignmentInt =
3313 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3314 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3315 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3317 .addAlignmentAttr(llvm::Align(AlignmentInt)));
3324 AI->addAttr(llvm::Attribute::NoAlias);
3332 assert(NumIRArgs == 1);
3336 llvm::Value *
V = AI;
3344 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3345 llvm::Value *incomingErrorValue =
Builder.CreateLoad(arg);
3346 Builder.CreateStore(incomingErrorValue, temp);
3367 if (
V->getType() != LTy)
3378 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3379 llvm::Value *ArgVal = Fn->getArg(FirstIRArg);
3380 if (
auto *VecTyFrom =
3381 dyn_cast<llvm::ScalableVectorType>(ArgVal->getType())) {
3383 *
this, VecTyTo, VecTyFrom, ArgVal, Arg->
getName());
3385 assert(NumIRArgs == 1);
3392 llvm::StructType *STy =
3403 STy->getNumElements() > 1) {
3404 llvm::TypeSize StructSize =
CGM.getDataLayout().getTypeAllocSize(STy);
3405 llvm::TypeSize PtrElementSize =
3407 if (StructSize.isScalable()) {
3408 assert(STy->containsHomogeneousScalableVectorTypes() &&
3409 "ABI only supports structure with homogeneous scalable vector "
3411 assert(StructSize == PtrElementSize &&
3412 "Only allow non-fractional movement of structure with"
3413 "homogeneous scalable vector type");
3414 assert(STy->getNumElements() == NumIRArgs);
3416 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3417 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3418 auto *AI = Fn->getArg(FirstIRArg + i);
3419 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3421 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3424 Builder.CreateStore(LoadedStructValue, Ptr);
3426 uint64_t SrcSize = StructSize.getFixedValue();
3427 uint64_t DstSize = PtrElementSize.getFixedValue();
3430 if (SrcSize <= DstSize) {
3437 assert(STy->getNumElements() == NumIRArgs);
3438 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3439 auto AI = Fn->getArg(FirstIRArg + i);
3440 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3442 Builder.CreateStore(AI, EltPtr);
3445 if (SrcSize > DstSize) {
3446 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3451 assert(NumIRArgs == 1);
3452 auto AI = Fn->getArg(FirstIRArg);
3453 AI->setName(Arg->
getName() +
".coerce");
3456 llvm::TypeSize::getFixed(
3457 getContext().getTypeSizeInChars(Ty).getQuantity() -
3482 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3486 unsigned argIndex = FirstIRArg;
3487 unsigned unpaddedIndex = 0;
3488 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3489 llvm::Type *eltType = coercionType->getElementType(i);
3493 auto eltAddr =
Builder.CreateStructGEP(alloca, i);
3494 llvm::Value *elt = Fn->getArg(argIndex++);
3496 auto paramType = unpaddedStruct
3497 ? unpaddedStruct->getElementType(unpaddedIndex++)
3498 : unpaddedCoercionType;
3500 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(eltType)) {
3501 if (
auto *VecTyFrom = dyn_cast<llvm::ScalableVectorType>(paramType)) {
3504 *
this, VecTyTo, VecTyFrom, elt, elt->getName());
3505 assert(Extracted &&
"Unexpected scalable to fixed vector coercion");
3508 Builder.CreateStore(elt, eltAddr);
3510 assert(argIndex == FirstIRArg + NumIRArgs);
3522 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3523 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3524 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3525 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3526 auto AI = Fn->getArg(FirstIRArg + i);
3527 AI->setName(Arg->
getName() +
"." + Twine(i));
3533 auto *AI = Fn->getArg(FirstIRArg);
3534 AI->setName(Arg->
getName() +
".target_coerce");
3538 CGM.getABIInfo().createCoercedStore(AI, Ptr, ArgI,
false, *
this);
3552 assert(NumIRArgs == 0);
3564 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3565 for (
int I = Args.size() - 1; I >= 0; --I)
3568 for (
unsigned I = 0, E = Args.size(); I != E; ++I)
3574 while (insn->use_empty()) {
3575 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3581 bitcast->eraseFromParent();
3587 llvm::Value *result) {
3589 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3592 if (&BB->back() != result)
3595 llvm::Type *resultType = result->getType();
3604 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3610 if (generator->getNextNode() != bitcast)
3613 InstsToKill.push_back(bitcast);
3620 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3624 bool doRetainAutorelease;
3627 doRetainAutorelease =
true;
3628 }
else if (call->getCalledOperand() ==
3630 doRetainAutorelease =
false;
3638 llvm::Instruction *prev = call->getPrevNode();
3641 prev = prev->getPrevNode();
3647 InstsToKill.push_back(prev);
3653 result = call->getArgOperand(0);
3654 InstsToKill.push_back(call);
3658 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3659 if (!bitcast->hasOneUse())
3661 InstsToKill.push_back(bitcast);
3662 result = bitcast->getOperand(0);
3666 for (
auto *I : InstsToKill)
3667 I->eraseFromParent();
3670 if (doRetainAutorelease)
3674 return CGF.
Builder.CreateBitCast(result, resultType);
3679 llvm::Value *result) {
3682 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3691 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3692 if (!retainCall || retainCall->getCalledOperand() !=
3697 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3698 llvm::LoadInst *load =
3699 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3700 if (!load || load->isAtomic() || load->isVolatile() ||
3707 llvm::Type *resultType = result->getType();
3709 assert(retainCall->use_empty());
3710 retainCall->eraseFromParent();
3713 return CGF.
Builder.CreateBitCast(load, resultType);
3720 llvm::Value *result) {
3743 auto GetStoreIfValid = [&CGF,
3744 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3745 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3746 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3752 assert(!SI->isAtomic() &&
3760 if (!ReturnValuePtr->hasOneUse()) {
3761 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3767 const llvm::Instruction *LoadIntoFakeUse =
nullptr;
3768 for (llvm::Instruction &I : llvm::reverse(*IP)) {
3772 if (LoadIntoFakeUse == &I)
3776 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) {
3777 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3780 if (II->getIntrinsicID() == llvm::Intrinsic::fake_use) {
3781 LoadIntoFakeUse = dyn_cast<llvm::Instruction>(II->getArgOperand(0));
3785 return GetStoreIfValid(&I);
3790 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3796 llvm::BasicBlock *StoreBB = store->getParent();
3797 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3799 while (IP != StoreBB) {
3800 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3816 int BitWidth,
int CharWidth) {
3817 assert(CharWidth <= 64);
3818 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3821 if (BitOffset >= CharWidth) {
3822 Pos += BitOffset / CharWidth;
3823 BitOffset = BitOffset % CharWidth;
3826 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3827 if (BitOffset + BitWidth >= CharWidth) {
3828 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3829 BitWidth -= CharWidth - BitOffset;
3833 while (BitWidth >= CharWidth) {
3835 BitWidth -= CharWidth;
3839 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3847 int StorageSize,
int BitOffset,
int BitWidth,
3848 int CharWidth,
bool BigEndian) {
3851 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3854 std::reverse(TmpBits.begin(), TmpBits.end());
3856 for (uint64_t
V : TmpBits)
3857 Bits[StorageOffset++] |=
V;
3860static void setUsedBits(CodeGenModule &, QualType,
int,
3861 SmallVectorImpl<uint64_t> &);
3872 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3903 QualType ETy = Context.getBaseElementType(ATy);
3904 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3908 for (
int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3909 auto Src = TmpBits.begin();
3910 auto Dst = Bits.begin() + Offset + I * Size;
3911 for (
int J = 0; J < Size; ++J)
3924 if (
const auto *ATy = Context.getAsConstantArrayType(QTy))
3927 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3931 std::fill_n(Bits.begin() + Offset, Size,
3932 (uint64_t(1) << Context.getCharWidth()) - 1);
3936 int Pos,
int Size,
int CharWidth,
3941 for (
auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3943 Mask = (Mask << CharWidth) | *P;
3945 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3947 Mask = (Mask << CharWidth) | *--P;
3956 llvm::IntegerType *ITy,
3958 assert(Src->getType() == ITy);
3959 assert(ITy->getScalarSizeInBits() <= 64);
3961 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
3962 int Size = DataLayout.getTypeStoreSize(ITy);
3966 int CharWidth =
CGM.getContext().getCharWidth();
3970 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3976 llvm::ArrayType *ATy,
3978 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
3979 int Size = DataLayout.getTypeStoreSize(ATy);
3984 int CharWidth =
CGM.getContext().getCharWidth();
3986 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3988 llvm::Value *R = llvm::PoisonValue::get(ATy);
3989 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3991 DataLayout.isBigEndian());
3992 MaskIndex += CharsPerElt;
3993 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3994 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3995 R =
Builder.CreateInsertValue(R, T1, I);
4003 uint64_t RetKeyInstructionsSourceAtom) {
4018 auto *I =
Builder.CreateRetVoid();
4019 if (RetKeyInstructionsSourceAtom)
4026 llvm::DebugLoc RetDbgLoc;
4027 llvm::Value *RV =
nullptr;
4037 llvm::Function::arg_iterator EI =
CurFn->arg_end();
4039 llvm::Value *ArgStruct = &*EI;
4040 llvm::Value *SRet =
Builder.CreateStructGEP(
4049 auto AI =
CurFn->arg_begin();
4067 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
4094 RetDbgLoc = SI->getDebugLoc();
4096 RV = SI->getValueOperand();
4097 SI->eraseFromParent();
4120 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
4121 RT = FD->getReturnType();
4122 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
4123 RT = MD->getReturnType();
4125 RT =
BlockInfo->BlockExpression->getFunctionType()->getReturnType();
4127 llvm_unreachable(
"Unexpected function/method type");
4143 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
4148 unsigned unpaddedIndex = 0;
4149 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4150 auto coercedEltType = coercionType->getElementType(i);
4154 auto eltAddr =
Builder.CreateStructGEP(addr, i);
4157 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
4158 : unpaddedCoercionType,
4160 results.push_back(elt);
4164 if (results.size() == 1) {
4172 RV = llvm::PoisonValue::get(returnType);
4173 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
4174 RV =
Builder.CreateInsertValue(RV, results[i], i);
4181 RV =
CGM.getABIInfo().createCoercedLoad(
V, RetAI, *
this);
4186 llvm_unreachable(
"Invalid ABI kind for return argument");
4189 llvm::Instruction *Ret;
4195 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4202 Ret =
Builder.CreateRetVoid();
4206 Ret->setDebugLoc(std::move(RetDbgLoc));
4208 llvm::Value *Backup = RV ? Ret->getOperand(0) :
nullptr;
4209 if (RetKeyInstructionsSourceAtom)
4225 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4226 if (
SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
4227 RetNNAttr =
CurCodeDecl->getAttr<ReturnsNonNullAttr>();
4229 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4237 assert(!requiresReturnValueNullabilityCheck() &&
4238 "Cannot check nullability and the nonnull attribute");
4239 AttrLoc = RetNNAttr->getLocation();
4240 CheckKind = SanitizerKind::SO_ReturnsNonnullAttribute;
4241 Handler = SanitizerHandler::NonnullReturn;
4243 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4244 if (
auto *TSI = DD->getTypeSourceInfo())
4246 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4247 CheckKind = SanitizerKind::SO_NullabilityReturn;
4248 Handler = SanitizerHandler::NullabilityReturn;
4257 llvm::Value *SLocPtr =
Builder.CreateLoad(ReturnLocation,
"return.sloc.load");
4258 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4259 if (requiresReturnValueNullabilityCheck())
4261 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4262 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4268 llvm::Value *DynamicData[] = {SLocPtr};
4269 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, DynamicData);
4288 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4289 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4314 if (
type->isReferenceType()) {
4323 param->
hasAttr<NSConsumedAttr>() &&
type->isObjCRetainableType()) {
4324 llvm::Value *ptr =
Builder.CreateLoad(local);
4327 Builder.CreateStore(null, local);
4338 type->castAsRecordDecl()->isParamDestroyedInCallee() &&
4343 "cleanup for callee-destructed param not recorded");
4345 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4351 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4361 const LValue &srcLV = writeback.
Source;
4362 Address srcAddr = srcLV.getAddress();
4364 "shouldn't have writeback for provably null argument");
4372 llvm::BasicBlock *contBB =
nullptr;
4378 if (!provablyNonNull) {
4383 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4392 "icr.writeback-cast");
4401 if (writeback.
ToUse) {
4426 if (!provablyNonNull)
4435 for (
const auto &I : llvm::reverse(Cleanups)) {
4437 I.IsActiveIP->eraseFromParent();
4443 if (uop->getOpcode() == UO_AddrOf)
4444 return uop->getSubExpr();
4469 Address srcAddr = srcLV.getAddress();
4474 llvm::PointerType *destType =
4476 llvm::Type *destElemType =
4503 llvm::BasicBlock *contBB =
nullptr;
4504 llvm::BasicBlock *originBB =
nullptr;
4507 llvm::Value *finalArgument;
4511 if (provablyNonNull) {
4516 finalArgument = CGF.
Builder.CreateSelect(
4517 isNull, llvm::ConstantPointerNull::get(destType),
4523 originBB = CGF.
Builder.GetInsertBlock();
4526 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4528 condEval.
begin(CGF);
4532 llvm::Value *valueToUse =
nullptr;
4540 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4557 if (shouldCopy && !provablyNonNull) {
4558 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4563 llvm::PHINode *phiToUse =
4564 CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
"icr.to-use");
4565 phiToUse->addIncoming(valueToUse, copyBB);
4566 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4568 valueToUse = phiToUse;
4582 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4588 CGF.
Builder.CreateStackRestore(StackBase);
4595 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4596 SanOpts.has(SanitizerKind::NullabilityArg)))
4601 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4604 const NonNullAttr *NNAttr =
nullptr;
4605 if (
SanOpts.has(SanitizerKind::NonnullAttribute))
4608 bool CanCheckNullability =
false;
4609 if (
SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4610 !PVD->getType()->isRecordType()) {
4611 auto Nullability = PVD->getType()->getNullability();
4612 CanCheckNullability = Nullability &&
4614 PVD->getTypeSourceInfo();
4617 if (!NNAttr && !CanCheckNullability)
4624 AttrLoc = NNAttr->getLocation();
4625 CheckKind = SanitizerKind::SO_NonnullAttribute;
4626 Handler = SanitizerHandler::NonnullArg;
4628 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4629 CheckKind = SanitizerKind::SO_NullabilityArg;
4630 Handler = SanitizerHandler::NullabilityArg;
4635 llvm::Constant *StaticData[] = {
4638 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4640 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, {});
4646 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4647 SanOpts.has(SanitizerKind::NullabilityArg)))
4666 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4677 return classDecl->getTypeParamListAsWritten();
4681 return catDecl->getTypeParamList();
4691 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4695 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4696 "Can't skip parameters if type info is not provided");
4706 bool IsVariadic =
false;
4708 const auto *MD = dyn_cast<const ObjCMethodDecl *>(
Prototype.P);
4710 IsVariadic = MD->isVariadic();
4712 MD,
CGM.getTarget().getTriple().isOSWindows());
4713 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4714 MD->param_type_end());
4717 IsVariadic = FPT->isVariadic();
4718 ExplicitCC = FPT->getExtInfo().getCC();
4719 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4720 FPT->param_type_end());
4728 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4735 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4736 "type mismatch in call argument!");
4742 assert((Arg == ArgRange.end() || IsVariadic) &&
4743 "Extra arguments in non-variadic function!");
4748 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4749 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4750 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4758 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4762 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4771 auto SizeTy = Context.getSizeType();
4773 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4774 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(
4775 Arg, PS->getType(),
T, EmittedArg.getScalarVal(), PS->isDynamic());
4780 std::swap(Args.back(), *(&Args.back() - 1));
4785 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4786 "inalloca only supported on x86");
4791 size_t CallArgsStart = Args.size();
4792 for (
unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4793 unsigned Idx = LeftToRight ? I : E - I - 1;
4795 unsigned InitialArgSize = Args.size();
4799 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4803 "Argument and parameter types don't match");
4807 assert(InitialArgSize + 1 == Args.size() &&
4808 "The code below depends on only adding one arg per EmitCallArg");
4809 (void)InitialArgSize;
4812 if (!Args.back().hasLValue()) {
4813 RValue RVArg = Args.back().getKnownRValue();
4815 ParamsToSkip + Idx);
4819 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4826 std::reverse(Args.begin() + CallArgsStart, Args.end());
4835struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4868 if (!HasLV &&
RV.isScalar())
4870 else if (!HasLV &&
RV.isComplex())
4873 auto Addr = HasLV ?
LV.getAddress() :
RV.getAggregateAddress();
4877 HasLV ?
LV.isVolatileQualified()
4878 :
RV.isVolatileQualified());
4890 std::optional<DisableDebugLocationUpdates> Dis;
4894 dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4908 "reference binding to unmaterialized r-value!");
4920 if (
type->isRecordType() &&
4921 type->castAsRecordDecl()->isParamDestroyedInCallee()) {
4928 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4929 if (
const auto *RD =
type->getAsCXXRecordDecl())
4930 DestroyedInCallee = RD->hasNonTrivialDestructor();
4932 NeedsCleanup =
type.isDestructedType();
4934 if (DestroyedInCallee)
4941 if (DestroyedInCallee && NeedsCleanup) {
4948 llvm::Instruction *IsActive =
4957 !
type->isArrayParameterType() && !
type.isNonTrivialToPrimitiveCopy()) {
4967QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4971 if (!getTarget().getTriple().isOSWindows())
4975 getContext().getTypeSize(Arg->
getType()) <
4979 return getContext().getIntPtrType();
4987void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4988 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4989 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4990 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4991 CGM.getNoObjCARCExceptionsMetadata());
4997 const llvm::Twine &name) {
4998 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value *>(), name);
5004 ArrayRef<Address> args,
5005 const llvm::Twine &name) {
5006 SmallVector<llvm::Value *, 3> values;
5007 for (
auto arg : args)
5008 values.push_back(
arg.emitRawPointer(*
this));
5009 return EmitNounwindRuntimeCall(callee, values, name);
5014 ArrayRef<llvm::Value *> args,
5015 const llvm::Twine &name) {
5016 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
5017 call->setDoesNotThrow();
5024 const llvm::Twine &name) {
5025 return EmitRuntimeCall(callee, {},
name);
5030SmallVector<llvm::OperandBundleDef, 1>
5039 if (
auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
5040 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
5041 auto IID = CalleeFn->getIntrinsicID();
5042 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
5055 const llvm::Twine &name) {
5056 llvm::CallInst *call = Builder.CreateCall(
5057 callee, args, getBundlesForFunclet(callee.getCallee()), name);
5058 call->setCallingConv(getRuntimeCC());
5060 if (CGM.shouldEmitConvergenceTokens() && call->isConvergent())
5072 llvm::InvokeInst *invoke =
Builder.CreateInvoke(
5074 invoke->setDoesNotReturn();
5077 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
5078 call->setDoesNotReturn();
5087 const Twine &name) {
5095 const Twine &name) {
5105 const Twine &Name) {
5110 llvm::CallBase *Inst;
5112 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
5115 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
5122 if (
CGM.getLangOpts().ObjCAutoRefCount)
5123 AddObjCARCExceptionMetadata(Inst);
5128void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
5130 DeferredReplacements.push_back(
5131 std::make_pair(llvm::WeakTrackingVH(Old),
New));
5138[[nodiscard]] llvm::AttributeList
5139maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
5140 const llvm::AttributeList &Attrs,
5141 llvm::Align NewAlign) {
5142 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
5143 if (CurAlign >= NewAlign)
5145 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
5146 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
5147 .addRetAttribute(Ctx, AlignAttr);
5150template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
5155 const AlignedAttrTy *AA =
nullptr;
5157 llvm::Value *Alignment =
nullptr;
5158 llvm::ConstantInt *OffsetCI =
nullptr;
5164 AA = FuncDecl->
getAttr<AlignedAttrTy>();
5169 [[nodiscard]] llvm::AttributeList
5170 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5171 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5173 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5178 if (!AlignmentCI->getValue().isPowerOf2())
5180 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5183 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5191 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
5195 AA->getLocation(), Alignment, OffsetCI);
5201class AssumeAlignedAttrEmitter final
5202 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5204 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl)
5205 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5210 if (Expr *Offset = AA->getOffset()) {
5212 if (OffsetCI->isNullValue())
5219class AllocAlignAttrEmitter final
5220 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5222 AllocAlignAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl,
5223 const CallArgList &CallArgs)
5224 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5228 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5237 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5238 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5239 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5242 unsigned MaxVectorWidth = 0;
5243 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5244 for (
auto *I : ST->elements())
5246 return MaxVectorWidth;
5253 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5255 bool IsVirtualFunctionPointerThunk) {
5258 assert(Callee.isOrdinary() || Callee.isVirtual());
5265 llvm::FunctionType *IRFuncTy =
getTypes().GetFunctionType(CallInfo);
5267 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5268 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5275 if ((TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5276 (TargetDecl->
hasAttr<TargetAttr>() ||
5280 TargetDecl->
hasAttr<TargetAttr>())))
5287 const FunctionDecl *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl);
5288 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
CGM, Loc, CallerDecl,
5289 CalleeDecl, CallArgs, RetTy);
5296 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5297 const llvm::DataLayout &DL =
CGM.getDataLayout();
5299 llvm::AllocaInst *AI;
5301 IP = IP->getNextNode();
5302 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5308 AI->setAlignment(Align.getAsAlign());
5309 AI->setUsedWithInAlloca(
true);
5310 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5311 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5314 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), CallInfo);
5320 bool NeedSRetLifetimeEnd =
false;
5326 if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.
isIndirect()) {
5328 IRFunctionArgs.getSRetArgNo(),
5337 if (IRFunctionArgs.hasSRetArg()) {
5352 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5370 assert(CallInfo.
arg_size() == CallArgs.size() &&
5371 "Mismatch between function signature & arguments.");
5374 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5375 I != E; ++I, ++info_it, ++ArgNo) {
5379 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5380 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5383 unsigned FirstIRArg, NumIRArgs;
5384 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5386 bool ArgHasMaybeUndefAttr =
5391 assert(NumIRArgs == 0);
5392 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5393 if (I->isAggregate()) {
5395 ? I->getKnownLValue().getAddress()
5396 : I->getKnownRValue().getAggregateAddress();
5397 llvm::Instruction *Placeholder =
5402 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5403 Builder.SetInsertPoint(Placeholder);
5416 deferPlaceholderReplacement(Placeholder,
Addr.getPointer());
5421 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5422 "indirect-arg-temp");
5423 I->copyInto(*
this,
Addr);
5432 I->copyInto(*
this,
Addr);
5439 assert(NumIRArgs == 1);
5440 if (I->isAggregate()) {
5450 ? I->getKnownLValue().getAddress()
5451 : I->getKnownRValue().getAggregateAddress();
5453 const llvm::DataLayout *TD = &
CGM.getDataLayout();
5455 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5456 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5457 TD->getAllocaAddrSpace()) &&
5458 "indirect argument must be in alloca address space");
5460 bool NeedCopy =
false;
5461 if (
Addr.getAlignment() < Align &&
5462 llvm::getOrEnforceKnownAlignment(
Addr.emitRawPointer(*
this),
5466 }
else if (I->hasLValue()) {
5467 auto LV = I->getKnownLValue();
5472 if (!isByValOrRef ||
5473 (LV.getAlignment() <
getContext().getTypeAlignInChars(I->Ty))) {
5477 if (isByValOrRef &&
Addr.getType()->getAddressSpace() !=
5486 auto *
T = llvm::PointerType::get(
CGM.getLLVMContext(),
5494 *
this,
V, I->Ty.getAddressSpace(),
T,
true);
5495 if (ArgHasMaybeUndefAttr)
5496 Val =
Builder.CreateFreeze(Val);
5497 IRCallArgs[FirstIRArg] = Val;
5500 }
else if (I->getType()->isArrayParameterType()) {
5506 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5515 if (ArgHasMaybeUndefAttr)
5516 Val =
Builder.CreateFreeze(Val);
5517 IRCallArgs[FirstIRArg] = Val;
5522 CallLifetimeEndAfterCall.emplace_back(AI);
5525 I->copyInto(*
this, AI);
5530 assert(NumIRArgs == 0);
5538 assert(NumIRArgs == 1);
5540 if (!I->isAggregate())
5541 V = I->getKnownRValue().getScalarVal();
5544 I->hasLValue() ? I->getKnownLValue().getAddress()
5545 : I->getKnownRValue().getAggregateAddress());
5551 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5555 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5562 llvm::Value *errorValue =
Builder.CreateLoad(swiftErrorArg);
5563 Builder.CreateStore(errorValue, swiftErrorTemp);
5568 V->getType()->isIntegerTy())
5575 if (FirstIRArg < IRFuncTy->getNumParams() &&
5576 V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
5577 assert(
V->getType()->isPointerTy() &&
"Only pointers can mismatch!");
5578 auto ActualAS = I->Ty.getAddressSpace();
5580 *
this,
V, ActualAS, IRFuncTy->getParamType(FirstIRArg));
5583 if (ArgHasMaybeUndefAttr)
5585 IRCallArgs[FirstIRArg] =
V;
5589 llvm::StructType *STy =
5594 if (!I->isAggregate()) {
5596 I->copyInto(*
this, Src);
5598 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5599 : I->getKnownRValue().getAggregateAddress();
5609 llvm::TypeSize SrcTypeSize =
5610 CGM.getDataLayout().getTypeAllocSize(SrcTy);
5611 llvm::TypeSize DstTypeSize =
CGM.getDataLayout().getTypeAllocSize(STy);
5612 if (SrcTypeSize.isScalable()) {
5613 assert(STy->containsHomogeneousScalableVectorTypes() &&
5614 "ABI only supports structure with homogeneous scalable vector "
5616 assert(SrcTypeSize == DstTypeSize &&
5617 "Only allow non-fractional movement of structure with "
5618 "homogeneous scalable vector type");
5619 assert(NumIRArgs == STy->getNumElements());
5621 llvm::Value *StoredStructValue =
5623 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5624 llvm::Value *Extract =
Builder.CreateExtractValue(
5625 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5626 IRCallArgs[FirstIRArg + i] = Extract;
5629 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5630 uint64_t DstSize = DstTypeSize.getFixedValue();
5636 if (SrcSize < DstSize) {
5639 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5645 assert(NumIRArgs == STy->getNumElements());
5646 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5648 llvm::Value *LI =
Builder.CreateLoad(EltPtr);
5649 if (ArgHasMaybeUndefAttr)
5650 LI =
Builder.CreateFreeze(LI);
5651 IRCallArgs[FirstIRArg + i] = LI;
5656 assert(NumIRArgs == 1);
5664 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5669 if (ArgHasMaybeUndefAttr)
5670 Load =
Builder.CreateFreeze(Load);
5671 IRCallArgs[FirstIRArg] = Load;
5679 auto layout =
CGM.getDataLayout().getStructLayout(coercionType);
5681 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
5685 bool NeedLifetimeEnd =
false;
5686 if (I->isAggregate()) {
5687 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5688 : I->getKnownRValue().getAggregateAddress();
5691 RValue RV = I->getKnownRValue();
5695 auto scalarAlign =
CGM.getDataLayout().getPrefTypeAlign(scalarType);
5700 layout->getAlignment(), scalarAlign)),
5702 nullptr, &AllocaAddr);
5710 unsigned IRArgPos = FirstIRArg;
5711 unsigned unpaddedIndex = 0;
5712 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5713 llvm::Type *eltType = coercionType->getElementType(i);
5720 : unpaddedCoercionType,
5722 if (ArgHasMaybeUndefAttr)
5723 elt =
Builder.CreateFreeze(elt);
5724 IRCallArgs[IRArgPos++] = elt;
5726 assert(IRArgPos == FirstIRArg + NumIRArgs);
5728 if (NeedLifetimeEnd)
5734 unsigned IRArgPos = FirstIRArg;
5735 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5736 assert(IRArgPos == FirstIRArg + NumIRArgs);
5742 if (!I->isAggregate()) {
5744 I->copyInto(*
this, Src);
5746 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5747 : I->getKnownRValue().getAggregateAddress();
5753 CGM.getABIInfo().createCoercedLoad(Src, ArgInfo, *
this);
5754 IRCallArgs[FirstIRArg] = Load;
5760 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*
this);
5766 assert(IRFunctionArgs.hasInallocaArg());
5767 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5778 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5779 llvm::Value *Ptr) -> llvm::Function * {
5780 if (!CalleeFT->isVarArg())
5784 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5785 if (CE->getOpcode() == llvm::Instruction::BitCast)
5786 Ptr = CE->getOperand(0);
5789 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5793 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5797 if (OrigFT->isVarArg() ||
5798 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5799 OrigFT->getReturnType() != CalleeFT->getReturnType())
5802 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5803 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5809 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5811 IRFuncTy = OrigFn->getFunctionType();
5822 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5823 LargestVectorWidth = std::max(LargestVectorWidth,
5828 llvm::AttributeList Attrs;
5829 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5834 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5835 getTarget().getTriple().isWindowsArm64EC()) {
5836 CGM.Error(Loc,
"__vectorcall calling convention is not currently "
5841 if (FD->hasAttr<StrictFPAttr>())
5843 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5848 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5849 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
5854 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5858 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5863 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
5864 CallerDecl, CalleeDecl))
5866 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5871 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5880 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>()) &&
5881 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
5882 CallerDecl, CalleeDecl)) {
5884 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5889 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5896 CannotThrow =
false;
5905 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5907 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5908 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5916 if (NeedSRetLifetimeEnd)
5924 if (
SanOpts.has(SanitizerKind::KCFI) &&
5925 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5932 if (FD->hasAttr<StrictFPAttr>())
5934 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5936 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
5937 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5939 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
5940 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5945 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5948 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5952 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5953 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
5958 if (
CGM.getCodeGenOpts().CallGraphSection) {
5962 else if (
const auto *FPT =
5963 Callee.getAbstractInfo().getCalleeFunctionProtoType())
5967 "Cannot find the callee type to generate callee_type metadata.");
5971 CGM.createCalleeTypeMetadataForIcall(CST, *callOrInvoke);
5978 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
5979 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
5980 if (A->getGuard() == CFGuardAttr::GuardArg::nocf &&
5981 !CI->getCalledFunction())
5987 CI->setAttributes(Attrs);
5988 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
5992 if (!CI->getType()->isVoidTy())
5993 CI->setName(
"call");
5995 if (
CGM.shouldEmitConvergenceTokens() && CI->isConvergent())
5996 CI = addConvergenceControlToken(CI);
5999 LargestVectorWidth =
6005 if (!CI->getCalledFunction())
6006 PGO->valueProfile(
Builder, llvm::IPVK_IndirectCallTarget, CI, CalleePtr);
6010 if (
CGM.getLangOpts().ObjCAutoRefCount)
6011 AddObjCARCExceptionMetadata(CI);
6014 bool IsPPC =
getTarget().getTriple().isPPC();
6015 bool IsMIPS =
getTarget().getTriple().isMIPS();
6016 bool HasMips16 =
false;
6019 HasMips16 = TargetOpts.
FeatureMap.lookup(
"mips16");
6021 HasMips16 = llvm::is_contained(TargetOpts.
Features,
"+mips16");
6023 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
6024 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
6025 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
6026 else if (IsMustTail) {
6029 CGM.getDiags().Report(Loc, diag::err_aix_musttail_unsupported);
6032 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 0;
6033 else if (
Call->isIndirectCall())
6034 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 1;
6035 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
6040 CGM.addUndefinedGlobalForTailCall(
6043 llvm::GlobalValue::LinkageTypes
Linkage =
CGM.getFunctionLinkage(
6045 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
6046 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
6047 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail)
6055 CGM.getDiags().Report(Loc, diag::err_mips_impossible_musttail) << 0;
6056 else if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
6057 CGM.addUndefinedGlobalForTailCall({FD, Loc});
6059 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
6068 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
6069 llvm::ConstantInt *
Line =
6071 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(
Line);
6073 CI->setMetadata(
"srcloc", MDT);
6081 if (CI->doesNotReturn()) {
6082 if (NeedSRetLifetimeEnd)
6086 if (
SanOpts.has(SanitizerKind::Unreachable)) {
6089 if (
auto *F = CI->getCalledFunction())
6090 F->removeFnAttr(llvm::Attribute::NoReturn);
6091 CI->removeFnAttr(llvm::Attribute::NoReturn);
6095 if (
SanOpts.hasOneOf(SanitizerKind::Address |
6096 SanitizerKind::KernelAddress)) {
6098 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
6100 auto *FnType = llvm::FunctionType::get(
CGM.VoidTy,
false);
6101 llvm::FunctionCallee Fn =
6102 CGM.CreateRuntimeFunction(FnType,
"__asan_handle_no_return");
6108 Builder.ClearInsertionPoint();
6127 if (Cleanup && Cleanup->isFakeUse()) {
6128 CGBuilderTy::InsertPointGuard IPG(
Builder);
6130 Cleanup->getCleanup()->Emit(*
this, EHScopeStack::Cleanup::Flags());
6131 }
else if (!(Cleanup &&
6132 Cleanup->getCleanup()->isRedundantBeforeReturn())) {
6133 CGM.ErrorUnsupported(
MustTailCall,
"tail call skipping over cleanups");
6136 if (CI->getType()->isVoidTy())
6140 Builder.ClearInsertionPoint();
6146 if (swiftErrorTemp.
isValid()) {
6147 llvm::Value *errorResult =
Builder.CreateLoad(swiftErrorTemp);
6148 Builder.CreateStore(errorResult, swiftErrorArg);
6165 if (IsVirtualFunctionPointerThunk) {
6178 unsigned unpaddedIndex = 0;
6179 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
6180 llvm::Type *eltType = coercionType->getElementType(i);
6184 llvm::Value *elt = CI;
6185 if (requiresExtract)
6186 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
6188 assert(unpaddedIndex == 0);
6189 Builder.CreateStore(elt, eltAddr);
6197 if (NeedSRetLifetimeEnd)
6214 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
6215 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
6223 llvm::Value *
V = CI;
6224 if (
V->getType() != RetIRTy)
6234 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
6235 llvm::Value *
V = CI;
6236 if (
auto *ScalableSrcTy =
6237 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
6238 if (FixedDstTy->getElementType() ==
6239 ScalableSrcTy->getElementType()) {
6240 V =
Builder.CreateExtractVector(FixedDstTy,
V, uint64_t(0),
6250 getContext().getTypeInfoDataSizeInChars(RetTy).Width.getQuantity();
6254 DestIsVolatile =
false;
6255 DestSize =
getContext().getTypeSizeInChars(RetTy).getQuantity();
6279 DestIsVolatile =
false;
6281 CGM.getABIInfo().createCoercedStore(CI, StorePtr, RetAI, DestIsVolatile,
6288 llvm_unreachable(
"Invalid ABI kind for return argument");
6291 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6296 if (Ret.isScalar() && TargetDecl) {
6297 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6298 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6304 LifetimeEnd.Emit(*
this, {});
6316 if (CalleeDecl && !CalleeDecl->
hasAttr<NoDebugAttr>() &&
6317 DI->getCallSiteRelatedAttrs() != llvm::DINode::FlagZero) {
6318 CodeGenFunction CalleeCGF(
CGM);
6320 Callee.getAbstractInfo().getCalleeDecl();
6321 CalleeCGF.
CurGD = CalleeGlobalDecl;
6324 DI->EmitFuncDeclForCallSite(
6325 CI, DI->getFunctionType(CalleeDecl, ResTy, Args), CalleeGlobalDecl);
6352 if (
VE->isMicrosoftABI())
6353 return CGM.getABIInfo().EmitMSVAArg(*
this, VAListAddr, Ty, Slot);
6354 return CGM.getABIInfo().EmitVAArg(*
this, VAListAddr, Ty, Slot);
6359 CGF.disableDebugInfo();
6363 CGF.enableDebugInfo();
static ExtParameterInfoList getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static CanQualTypeList getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsTargetDefaultMSABI)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
SmallVector< CanQualType, 16 > CanQualTypeList
static std::pair< llvm::Value *, bool > CoerceScalableToFixed(CodeGenFunction &CGF, llvm::FixedVectorType *ToTy, llvm::ScalableVectorType *FromTy, llvm::Value *V, StringRef Name="")
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
SmallVector< FunctionProtoType::ExtParameterInfo, 16 > ExtParameterInfoList
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static CanQualTypeList getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
static void appendParameterTypes(const CIRGenTypes &cgt, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > fpt)
Adds the formal parameters in FPT to the given prefix.
static const CIRGenFunctionInfo & arrangeFreeFunctionLikeCall(CIRGenTypes &cgt, CIRGenModule &cgm, const CallArgList &args, const FunctionType *fnType)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
#define CC_VLS_CASE(ABI_VLEN)
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
CanQualType getCanonicalSizeType() const
const TargetInfo & getTargetInfo() const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
This class is used for builtin types like 'int'.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const
ConstExprIterator const_arg_iterator
Represents a canonical, potentially-qualified type.
static CanQual< Type > CreateUnsafe(QualType Other)
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
unsigned getAddressSpace() const
Return the address space that this address resides in.
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
llvm::StringRef getName() const
Return the IR name of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
const ArgInfo * const_arg_iterator
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
const Decl * getDecl() const
unsigned getNumParams() const
bool hasFunctionDecl() const
An object to manage conditionally-evaluated expressions.
void begin(CodeGenFunction &CGF)
void end(CodeGenFunction &CGF)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
void callCStructDestructor(LValue Dst)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
const CodeGen::CGBlockInfo * BlockInfo
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
llvm::BasicBlock * getInvokeDest()
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
CGDebugInfo * getDebugInfo()
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
const TargetCodeGenInfo & getTargetHooks() const
void EmitLifetimeEnd(llvm::Value *Addr)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
llvm::Type * ConvertTypeForMem(QualType T)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CallExpr * MustTailCall
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
This class organizes the cross-function state that is used while generating LLVM code.
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
ObjCEntrypoints & getObjCEntrypoints() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeDeviceKernelCallerDeclaration(QualType resultType, const FunctionArgList &args)
A device kernel caller function is an offload device entry point function with a target device depend...
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeFunctionDeclaration(const GlobalDecl GD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
A saved depth on the scope stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
static void initPointerAuthFnAttributes(const PointerAuthOptions &Opts, llvm::AttrBuilder &FuncAttrs)
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
const FunctionType * getFunctionType(bool BlocksToo=true) const
Looks through the Decl's underlying type to extract a FunctionType when possible.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
bool isZeroLengthBitField() const
Is this a zero-length bit-field?
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
@ SME_AgnosticZAStateMask
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
KernelReferenceKind getKernelReferenceKind() const
const Decl * getDecl() const
This class represents temporary values used to represent inout and out arguments in HLSL.
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
field_iterator field_end() const
bool isParamDestroyedInCallee() const
field_iterator field_begin() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
bool isIncompleteArrayType() const
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
RecordDecl * castAsRecordDecl() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isRecordType() const
bool isObjCRetainableType() const
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool This(InterpState &S, CodePtr OpPC)
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isa(CodeGen::Address addr)
static bool classof(const OMPClause *T)
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
const FunctionProtoType * T
@ Dtor_Complete
Complete object dtor.
LangAS
Defines the address space values used by the address space qualifier of QualType.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
LangAS getLangASFromTargetAS(unsigned TargetAS)
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
const Expr * WritebackExpr
An Expression (optional) that performs the writeback with any required casting.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
~DisableDebugLocationUpdates()
DisableDebugLocationUpdates(CodeGenFunction &CGF)
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.