35#include "llvm/ADT/STLExtras.h"
36#include "llvm/ADT/StringExtras.h"
37#include "llvm/Analysis/ValueTracking.h"
38#include "llvm/IR/Assumptions.h"
39#include "llvm/IR/AttributeMask.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugInfoMetadata.h"
44#include "llvm/IR/InlineAsm.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/Type.h"
48#include "llvm/Transforms/Utils/Local.h"
58 return llvm::CallingConv::C;
60 return llvm::CallingConv::X86_StdCall;
62 return llvm::CallingConv::X86_FastCall;
64 return llvm::CallingConv::X86_RegCall;
66 return llvm::CallingConv::X86_ThisCall;
68 return llvm::CallingConv::Win64;
70 return llvm::CallingConv::X86_64_SysV;
72 return llvm::CallingConv::ARM_AAPCS;
74 return llvm::CallingConv::ARM_AAPCS_VFP;
76 return llvm::CallingConv::Intel_OCL_BI;
79 return llvm::CallingConv::C;
82 return llvm::CallingConv::X86_VectorCall;
84 return llvm::CallingConv::AArch64_VectorCall;
86 return llvm::CallingConv::AArch64_SVE_VectorCall;
88 return llvm::CallingConv::SPIR_FUNC;
90 return CGM.getTargetCodeGenInfo().getDeviceKernelCallingConv();
92 return llvm::CallingConv::PreserveMost;
94 return llvm::CallingConv::PreserveAll;
96 return llvm::CallingConv::Swift;
98 return llvm::CallingConv::SwiftTail;
100 return llvm::CallingConv::M68k_RTD;
102 return llvm::CallingConv::PreserveNone;
106#define CC_VLS_CASE(ABI_VLEN) \
107 case CC_RISCVVLSCall_##ABI_VLEN: \
108 return llvm::CallingConv::RISCV_VLSCall_##ABI_VLEN;
133 RecTy = Context.getCanonicalTagType(RD);
135 RecTy = Context.VoidTy;
140 return Context.getPointerType(RecTy);
173 assert(paramInfos.size() <= prefixArgs);
174 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
176 paramInfos.reserve(totalArgs);
179 paramInfos.resize(prefixArgs);
183 paramInfos.push_back(ParamInfo);
185 if (ParamInfo.hasPassObjectSize())
186 paramInfos.emplace_back();
189 assert(paramInfos.size() <= totalArgs &&
190 "Did we forget to insert pass_object_size args?");
192 paramInfos.resize(totalArgs);
202 if (!FPT->hasExtParameterInfos()) {
203 assert(paramInfos.empty() &&
204 "We have paramInfos, but the prototype doesn't?");
205 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
209 unsigned PrefixSize = prefix.size();
213 prefix.reserve(prefix.size() + FPT->getNumParams());
215 auto ExtInfos = FPT->getExtParameterInfos();
216 assert(ExtInfos.size() == FPT->getNumParams());
217 for (
unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
218 prefix.push_back(FPT->getParamType(I));
219 if (ExtInfos[I].hasPassObjectSize())
244 FTP->getExtInfo(), paramInfos,
Required);
254 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
259 bool IsTargetDefaultMSABI) {
264 if (D->
hasAttr<FastCallAttr>())
270 if (D->
hasAttr<ThisCallAttr>())
273 if (D->
hasAttr<VectorCallAttr>())
279 if (PcsAttr *PCS = D->
getAttr<PcsAttr>())
282 if (D->
hasAttr<AArch64VectorPcsAttr>())
285 if (D->
hasAttr<AArch64SVEPcsAttr>())
288 if (D->
hasAttr<DeviceKernelAttr>())
291 if (D->
hasAttr<IntelOclBiccAttr>())
300 if (D->
hasAttr<PreserveMostAttr>())
303 if (D->
hasAttr<PreserveAllAttr>())
309 if (D->
hasAttr<PreserveNoneAttr>())
312 if (D->
hasAttr<RISCVVectorCCAttr>())
315 if (RISCVVLSCCAttr *PCS = D->
getAttr<RISCVVLSCCAttr>()) {
316 switch (PCS->getVectorWidth()) {
318 llvm_unreachable(
"Invalid RISC-V VLS ABI VLEN");
319#define CC_VLS_CASE(ABI_VLEN) \
321 return CC_RISCVVLSCall_##ABI_VLEN;
356 return ::arrangeLLVMFunctionInfo(
357 *
this,
true, argTypes,
364 if (FD->
hasAttr<CUDAGlobalAttr>()) {
400 !Target.getCXXABI().hasConstructorVariants();
413 bool PassParams =
true;
415 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
418 if (
auto Inherited = CD->getInheritedConstructor())
430 if (!paramInfos.empty()) {
433 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
436 paramInfos.append(AddedArgs.
Suffix,
441 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
447 ? CGM.getContext().VoidPtrTy
450 argTypes, extInfo, paramInfos, required);
456 for (
auto &arg : args)
464 for (
auto &arg : args)
471 unsigned totalArgs) {
489 unsigned ExtraPrefixArgs,
unsigned ExtraSuffixArgs,
bool PassProtoArgs) {
491 for (
const auto &Arg : args)
492 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
495 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
500 FPT, TotalPrefixArgs + ExtraSuffixArgs)
506 ? CGM.getContext().VoidPtrTy
513 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
520 ArgTypes, Info, ParamInfos,
Required);
529 if (MD->isImplicitObjectMemberFunction())
537 if (DeviceKernelAttr::isOpenCLSpelling(FD->
getAttr<DeviceKernelAttr>()) &&
540 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
548 {}, noProto->getExtInfo(), {},
575 argTys.push_back(Context.getCanonicalParamType(receiverType));
577 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
579 argTys.push_back(Context.getCanonicalParamType(I->getType()));
581 I->hasAttr<NoEscapeAttr>());
582 extParamInfos.push_back(extParamInfo);
586 bool IsTargetDefaultMSABI =
592 if (
getContext().getLangOpts().ObjCAutoRefCount &&
593 MD->
hasAttr<NSReturnsRetainedAttr>())
630 assert(MD->
isVirtual() &&
"only methods have thunks");
647 ArgTys.push_back(*FTP->param_type_begin());
649 ArgTys.push_back(Context.IntTy);
650 CallingConv CC = Context.getDefaultCallingConvention(
662 unsigned numExtraRequiredArgs,
bool chainCall) {
663 assert(args.size() >= numExtraRequiredArgs);
673 if (proto->isVariadic())
676 if (proto->hasExtParameterInfos())
690 for (
const auto &arg : args)
695 paramInfos, required);
705 chainCall ? 1 : 0, chainCall);
734 for (
const auto &Arg : args)
735 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
775 assert(numPrefixArgs + 1 <= args.size() &&
776 "Emitting a call with less args than the required prefix?");
787 paramInfos, required);
798 assert(signature.
arg_size() <= args.size());
799 if (signature.
arg_size() == args.size())
804 if (!sigParamInfos.empty()) {
805 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
806 paramInfos.resize(args.size());
838 assert(llvm::all_of(argTypes,
839 [](
CanQualType T) {
return T.isCanonicalAsParam(); }));
842 llvm::FoldingSetNodeID ID;
847 bool isDelegateCall =
850 info, paramInfos, required, resultType, argTypes);
852 void *insertPos =
nullptr;
853 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
861 info, paramInfos, resultType, argTypes, required);
862 FunctionInfos.InsertNode(FI, insertPos);
864 bool inserted = FunctionsBeingProcessed.insert(FI).second;
866 assert(inserted &&
"Recursively being processed?");
869 if (CC == llvm::CallingConv::SPIR_KERNEL) {
876 CGM.getABIInfo().computeInfo(*FI);
887 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
890 bool erased = FunctionsBeingProcessed.erase(FI);
892 assert(erased &&
"Not in set?");
898 bool chainCall,
bool delegateCall,
904 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
908 void *buffer =
operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
909 argTypes.size() + 1, paramInfos.size()));
911 CGFunctionInfo *FI =
new (buffer) CGFunctionInfo();
912 FI->CallingConvention = llvmCC;
913 FI->EffectiveCallingConvention = llvmCC;
914 FI->ASTCallingConvention = info.
getCC();
915 FI->InstanceMethod = instanceMethod;
916 FI->ChainCall = chainCall;
917 FI->DelegateCall = delegateCall;
923 FI->Required = required;
926 FI->ArgStruct =
nullptr;
927 FI->ArgStructAlign = 0;
928 FI->NumArgs = argTypes.size();
929 FI->HasExtParameterInfos = !paramInfos.empty();
930 FI->getArgsBuffer()[0].
type = resultType;
931 FI->MaxVectorWidth = 0;
932 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
933 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
934 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
935 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
945struct TypeExpansion {
946 enum TypeExpansionKind {
958 const TypeExpansionKind Kind;
960 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
961 virtual ~TypeExpansion() {}
964struct ConstantArrayExpansion : TypeExpansion {
968 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
969 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
970 static bool classof(
const TypeExpansion *TE) {
971 return TE->Kind == TEK_ConstantArray;
975struct RecordExpansion : TypeExpansion {
976 SmallVector<const CXXBaseSpecifier *, 1> Bases;
978 SmallVector<const FieldDecl *, 1> Fields;
980 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
981 SmallVector<const FieldDecl *, 1> &&Fields)
982 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
983 Fields(std::move(Fields)) {}
984 static bool classof(
const TypeExpansion *TE) {
985 return TE->Kind == TEK_Record;
989struct ComplexExpansion : TypeExpansion {
992 ComplexExpansion(QualType EltTy) : TypeExpansion(
TEK_Complex), EltTy(EltTy) {}
993 static bool classof(
const TypeExpansion *TE) {
998struct NoExpansion : TypeExpansion {
999 NoExpansion() : TypeExpansion(TEK_None) {}
1000 static bool classof(
const TypeExpansion *TE) {
return TE->Kind == TEK_None; }
1004static std::unique_ptr<TypeExpansion>
1007 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
1013 assert(!RD->hasFlexibleArrayMember() &&
1014 "Cannot expand structure with flexible array.");
1015 if (RD->isUnion()) {
1021 for (
const auto *FD : RD->fields()) {
1022 if (FD->isZeroLengthBitField())
1024 assert(!FD->isBitField() &&
1025 "Cannot expand structure with bit-field members.");
1026 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
1027 if (UnionSize < FieldSize) {
1028 UnionSize = FieldSize;
1033 Fields.push_back(LargestFD);
1035 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1036 assert(!CXXRD->isDynamicClass() &&
1037 "cannot expand vtable pointers in dynamic classes");
1038 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
1041 for (
const auto *FD : RD->fields()) {
1042 if (FD->isZeroLengthBitField())
1044 assert(!FD->isBitField() &&
1045 "Cannot expand structure with bit-field members.");
1046 Fields.push_back(FD);
1049 return std::make_unique<RecordExpansion>(std::move(Bases),
1053 return std::make_unique<ComplexExpansion>(CT->getElementType());
1055 return std::make_unique<NoExpansion>();
1060 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1063 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1065 for (
auto BS : RExp->Bases)
1067 for (
auto FD : RExp->Fields)
1080 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1081 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1084 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1085 for (
auto BS : RExp->Bases)
1087 for (
auto FD : RExp->Fields)
1089 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1100 ConstantArrayExpansion *CAE,
1102 llvm::function_ref<
void(
Address)> Fn) {
1103 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1109void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1110 llvm::Function::arg_iterator &AI) {
1111 assert(LV.isSimple() &&
1112 "Unexpected non-simple lvalue during struct expansion.");
1115 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1117 *
this, CAExp, LV.getAddress(), [&](Address EltAddr) {
1118 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1119 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1121 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1122 Address
This = LV.getAddress();
1123 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1127 false, SourceLocation());
1128 LValue SubLV = MakeAddrLValue(Base, BS->
getType());
1131 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1133 for (
auto FD : RExp->Fields) {
1135 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1136 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1139 auto realValue = &*AI++;
1140 auto imagValue = &*AI++;
1141 EmitStoreOfComplex(
ComplexPairTy(realValue, imagValue), LV,
true);
1146 llvm::Value *Arg = &*AI++;
1147 if (LV.isBitField()) {
1153 if (Arg->getType()->isPointerTy()) {
1154 Address
Addr = LV.getAddress();
1155 Arg = Builder.CreateBitCast(Arg,
Addr.getElementType());
1157 EmitStoreOfScalar(Arg, LV);
1162void CodeGenFunction::ExpandTypeToArgs(
1163 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1164 SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos) {
1166 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1171 CallArg(convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1173 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1176 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1179 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1183 false, SourceLocation());
1187 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1191 LValue LV = MakeAddrLValue(This, Ty);
1192 for (
auto FD : RExp->Fields) {
1194 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1195 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1200 IRCallArgs[IRCallArgPos++] = CV.first;
1201 IRCallArgs[IRCallArgPos++] = CV.second;
1205 assert(RV.isScalar() &&
1206 "Unexpected non-scalar rvalue during struct expansion.");
1209 llvm::Value *
V = RV.getScalarVal();
1210 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1211 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1212 V = Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1214 IRCallArgs[IRCallArgPos++] =
V;
1222 const Twine &Name =
"tmp") {
1235 llvm::StructType *SrcSTy,
1239 if (SrcSTy->getNumElements() == 0)
1248 uint64_t FirstEltSize = CGF.
CGM.
getDataLayout().getTypeStoreSize(FirstElt);
1249 if (FirstEltSize < DstSize &&
1258 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1273 if (Val->getType() == Ty)
1279 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1285 llvm::Type *DestIntTy = Ty;
1289 if (Val->getType() != DestIntTy) {
1291 if (DL.isBigEndian()) {
1294 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1295 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1297 if (SrcSize > DstSize) {
1298 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1299 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1301 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1302 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1306 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1311 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1318 if (PFPFields.empty())
1321 auto LoadCoercedField = [&](
CharUnits Offset,
1322 llvm::Type *FieldType) -> llvm::Value * {
1327 if (!PFPFields.empty() && PFPFields[0].Offset == Offset) {
1331 FieldVal = CGF.
Builder.CreatePtrToInt(FieldVal, FieldType);
1332 PFPFields.erase(PFPFields.begin());
1349 Val = CGF.
Builder.CreatePtrToInt(Val, Ty);
1353 auto *ET = AT->getElementType();
1357 llvm::Value *Val = llvm::PoisonValue::get(AT);
1358 for (
unsigned Idx = 0; Idx != AT->getNumElements(); ++Idx, Offset += WordSize)
1359 Val = CGF.
Builder.CreateInsertValue(Val, LoadCoercedField(Offset, ET), Idx);
1383 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1385 DstSize.getFixedValue(), CGF);
1400 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1401 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1415 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1416 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1419 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1420 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1421 ScalableDstTy = llvm::ScalableVectorType::get(
1422 FixedSrcTy->getElementType(),
1424 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
1426 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1428 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1429 llvm::Value *Result = CGF.
Builder.CreateInsertVector(
1430 ScalableDstTy, PoisonVec, Load, uint64_t(0),
"cast.scalable");
1432 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, Ty));
1433 if (Result->getType() != ScalableDstTy)
1434 Result = CGF.
Builder.CreateBitCast(Result, ScalableDstTy);
1435 if (Result->getType() != Ty)
1436 Result = CGF.
Builder.CreateExtractVector(Ty, Result, uint64_t(0));
1448 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1455 if (PFPFields.empty())
1458 llvm::Type *SrcTy = Src->getType();
1459 auto StoreCoercedField = [&](
CharUnits Offset, llvm::Value *FieldVal) {
1460 if (!PFPFields.empty() && PFPFields[0].Offset == Offset) {
1465 PFPFields.erase(PFPFields.begin());
1485 auto *ET = AT->getElementType();
1489 for (
unsigned i = 0; i != AT->getNumElements(); ++i, Offset += WordSize)
1490 StoreCoercedField(Offset, CGF.
Builder.CreateExtractValue(Src, i));
1496 Address Dst, llvm::TypeSize DstSize,
1497 bool DstIsVolatile) {
1501 llvm::Type *SrcTy = Src->getType();
1502 llvm::TypeSize SrcSize =
CGM.getDataLayout().getTypeAllocSize(SrcTy);
1508 if (llvm::StructType *DstSTy =
1510 assert(!SrcSize.isScalable());
1512 SrcSize.getFixedValue(), *
this);
1519 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1520 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1524 auto *I =
Builder.CreateStore(Src, Dst, DstIsVolatile);
1526 }
else if (llvm::StructType *STy =
1527 dyn_cast<llvm::StructType>(Src->getType())) {
1530 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1532 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1533 auto *I =
Builder.CreateStore(Elt, EltPtr, DstIsVolatile);
1541 }
else if (SrcTy->isIntegerTy()) {
1543 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1560 Builder.CreateStore(Src, Tmp);
1561 auto *I =
Builder.CreateMemCpy(
1580static std::pair<llvm::Value *, bool>
1582 llvm::ScalableVectorType *FromTy, llvm::Value *
V,
1583 StringRef Name =
"") {
1586 if (FromTy->getElementType()->isIntegerTy(1) &&
1587 ToTy->getElementType() == CGF.
Builder.getInt8Ty()) {
1588 if (!FromTy->getElementCount().isKnownMultipleOf(8)) {
1589 FromTy = llvm::ScalableVectorType::get(
1590 FromTy->getElementType(),
1591 llvm::alignTo<8>(FromTy->getElementCount().getKnownMinValue()));
1592 llvm::Value *ZeroVec = llvm::Constant::getNullValue(FromTy);
1593 V = CGF.
Builder.CreateInsertVector(FromTy, ZeroVec,
V, uint64_t(0));
1595 FromTy = llvm::ScalableVectorType::get(
1596 ToTy->getElementType(),
1597 FromTy->getElementCount().getKnownMinValue() / 8);
1598 V = CGF.
Builder.CreateBitCast(
V, FromTy);
1600 if (FromTy->getElementType() == ToTy->getElementType()) {
1601 V->setName(Name +
".coerce");
1602 V = CGF.
Builder.CreateExtractVector(ToTy,
V, uint64_t(0),
"cast.fixed");
1612class ClangToLLVMArgMapping {
1613 static const unsigned InvalidIndex = ~0U;
1614 unsigned InallocaArgNo;
1616 unsigned TotalIRArgs;
1620 unsigned PaddingArgIndex;
1623 unsigned FirstArgIndex;
1624 unsigned NumberOfArgs;
1627 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1631 SmallVector<IRArgs, 8> ArgInfo;
1634 ClangToLLVMArgMapping(
const ASTContext &Context,
const CGFunctionInfo &FI,
1635 bool OnlyRequiredArgs =
false)
1636 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1637 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1638 construct(Context, FI, OnlyRequiredArgs);
1641 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1642 unsigned getInallocaArgNo()
const {
1643 assert(hasInallocaArg());
1644 return InallocaArgNo;
1647 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1648 unsigned getSRetArgNo()
const {
1649 assert(hasSRetArg());
1653 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1655 bool hasPaddingArg(
unsigned ArgNo)
const {
1656 assert(ArgNo < ArgInfo.size());
1657 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1659 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1660 assert(hasPaddingArg(ArgNo));
1661 return ArgInfo[ArgNo].PaddingArgIndex;
1666 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1667 assert(ArgNo < ArgInfo.size());
1668 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1669 ArgInfo[ArgNo].NumberOfArgs);
1673 void construct(
const ASTContext &Context,
const CGFunctionInfo &FI,
1674 bool OnlyRequiredArgs);
1677void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1678 const CGFunctionInfo &FI,
1679 bool OnlyRequiredArgs) {
1680 unsigned IRArgNo = 0;
1681 bool SwapThisWithSRet =
false;
1686 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1694 QualType ArgType = I->type;
1695 const ABIArgInfo &AI = I->info;
1697 auto &IRArgs = ArgInfo[ArgNo];
1700 IRArgs.PaddingArgIndex = IRArgNo++;
1707 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1709 IRArgs.NumberOfArgs = STy->getNumElements();
1711 IRArgs.NumberOfArgs = 1;
1717 IRArgs.NumberOfArgs = 1;
1722 IRArgs.NumberOfArgs = 0;
1732 if (IRArgs.NumberOfArgs > 0) {
1733 IRArgs.FirstArgIndex = IRArgNo;
1734 IRArgNo += IRArgs.NumberOfArgs;
1739 if (IRArgNo == 1 && SwapThisWithSRet)
1742 assert(ArgNo == ArgInfo.size());
1745 InallocaArgNo = IRArgNo++;
1747 TotalIRArgs = IRArgNo;
1755 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1770 switch (BT->getKind()) {
1773 case BuiltinType::Float:
1775 case BuiltinType::Double:
1777 case BuiltinType::LongDouble:
1788 if (BT->getKind() == BuiltinType::LongDouble)
1789 return getTarget().useObjCFP2RetForComplexLongDouble();
1803 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1805 assert(Inserted &&
"Recursively being processed?");
1807 llvm::Type *resultType =
nullptr;
1812 llvm_unreachable(
"Invalid ABI kind for return argument");
1824 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1825 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1841 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1845 if (IRFunctionArgs.hasSRetArg()) {
1846 ArgTypes[IRFunctionArgs.getSRetArgNo()] = llvm::PointerType::get(
1851 if (IRFunctionArgs.hasInallocaArg())
1852 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1859 for (; it != ie; ++it, ++ArgNo) {
1863 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1864 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1867 unsigned FirstIRArg, NumIRArgs;
1868 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1873 assert(NumIRArgs == 0);
1877 assert(NumIRArgs == 1);
1879 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1883 assert(NumIRArgs == 1);
1884 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1893 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1895 assert(NumIRArgs == st->getNumElements());
1896 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1897 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1899 assert(NumIRArgs == 1);
1900 ArgTypes[FirstIRArg] = argType;
1906 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1908 *ArgTypesIter++ = EltTy;
1910 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1915 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1917 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1922 bool Erased = FunctionsBeingProcessed.erase(&FI);
1924 assert(Erased &&
"Not in set?");
1926 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1940 llvm::AttrBuilder &FuncAttrs,
1947 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1951 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1953 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1955 FuncAttrs.addAttribute(
"aarch64_za_state_agnostic");
1959 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1961 FuncAttrs.addAttribute(
"aarch64_in_za");
1963 FuncAttrs.addAttribute(
"aarch64_out_za");
1965 FuncAttrs.addAttribute(
"aarch64_inout_za");
1969 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1971 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1973 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1975 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1979 const Decl *Callee) {
1985 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1986 AA->getAssumption().split(Attrs,
",");
1989 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1990 llvm::join(Attrs.begin(), Attrs.end(),
","));
1997 if (
const RecordType *RT =
1999 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
2000 return ClassDecl->hasTrivialDestructor();
2006 const Decl *TargetDecl) {
2012 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
2016 if (!
Module.getLangOpts().CPlusPlus)
2019 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
2020 if (FDecl->isExternC())
2022 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
2024 if (VDecl->isExternC())
2032 return Module.getCodeGenOpts().StrictReturn ||
2033 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
2034 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
2041 llvm::DenormalMode FP32DenormalMode,
2042 llvm::AttrBuilder &FuncAttrs) {
2043 llvm::DenormalFPEnv FPEnv(FPDenormalMode, FP32DenormalMode);
2044 if (FPEnv != llvm::DenormalFPEnv::getDefault())
2045 FuncAttrs.addDenormalFPEnvAttr(FPEnv);
2053 llvm::AttrBuilder &FuncAttrs) {
2059 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
2061 llvm::AttrBuilder &FuncAttrs) {
2064 if (CodeGenOpts.OptimizeSize)
2065 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
2066 if (CodeGenOpts.OptimizeSize == 2)
2067 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
2070 if (CodeGenOpts.DisableRedZone)
2071 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
2072 if (CodeGenOpts.IndirectTlsSegRefs)
2073 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
2074 if (CodeGenOpts.NoImplicitFloat)
2075 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
2077 if (AttrOnCallSite) {
2082 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
2084 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
2086 switch (CodeGenOpts.getFramePointer()) {
2094 FuncAttrs.addAttribute(
"frame-pointer",
2096 CodeGenOpts.getFramePointer()));
2099 if (CodeGenOpts.LessPreciseFPMAD)
2100 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
2102 if (CodeGenOpts.NullPointerIsValid)
2103 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
2106 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
2110 if (LangOpts.NoHonorNaNs)
2111 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
2112 if (CodeGenOpts.SoftFloat)
2113 FuncAttrs.addAttribute(
"use-soft-float",
"true");
2114 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
2115 llvm::utostr(CodeGenOpts.SSPBufferSize));
2116 if (LangOpts.NoSignedZero)
2117 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
2120 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
2121 if (!Recips.empty())
2122 FuncAttrs.addAttribute(
"reciprocal-estimates", llvm::join(Recips,
","));
2126 FuncAttrs.addAttribute(
"prefer-vector-width",
2129 if (CodeGenOpts.StackRealignment)
2130 FuncAttrs.addAttribute(
"stackrealign");
2131 if (CodeGenOpts.Backchain)
2132 FuncAttrs.addAttribute(
"backchain");
2133 if (CodeGenOpts.EnableSegmentedStacks)
2134 FuncAttrs.addAttribute(
"split-stack");
2136 if (CodeGenOpts.SpeculativeLoadHardening)
2137 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2140 switch (CodeGenOpts.getZeroCallUsedRegs()) {
2141 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
2142 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2144 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
2145 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
2147 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
2148 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
2150 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
2151 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
2153 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
2154 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
2156 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
2157 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
2159 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2160 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2162 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2163 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2165 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2166 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2177 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2182 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2183 LangOpts.SYCLIsDevice) {
2184 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2187 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2188 FuncAttrs.addAttribute(
"save-reg-params");
2191 StringRef Var,
Value;
2193 FuncAttrs.addAttribute(Var,
Value);
2207 const llvm::Function &F,
2209 auto FFeatures = F.getFnAttribute(
"target-features");
2211 llvm::StringSet<> MergedNames;
2213 MergedFeatures.reserve(TargetOpts.
Features.size());
2215 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2216 for (StringRef
Feature : FeatureRange) {
2220 StringRef Name =
Feature.drop_front(1);
2221 bool Merged = !MergedNames.insert(Name).second;
2223 MergedFeatures.push_back(
Feature);
2227 if (FFeatures.isValid())
2228 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2229 AddUnmergedFeatures(TargetOpts.
Features);
2231 if (!MergedFeatures.empty()) {
2232 llvm::sort(MergedFeatures);
2233 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2240 bool WillInternalize) {
2242 llvm::AttrBuilder FuncAttrs(F.getContext());
2245 if (!TargetOpts.
CPU.empty())
2246 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2247 if (!TargetOpts.
TuneCPU.empty())
2248 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2251 CodeGenOpts, LangOpts,
2254 if (!WillInternalize && F.isInterposable()) {
2259 F.addFnAttrs(FuncAttrs);
2263 llvm::AttributeMask AttrsToRemove;
2267 llvm::DenormalFPEnv MergedFPEnv =
2268 OptsFPEnv.mergeCalleeMode(F.getDenormalFPEnv());
2270 if (MergedFPEnv == llvm::DenormalFPEnv::getDefault()) {
2271 AttrsToRemove.addAttribute(llvm::Attribute::DenormalFPEnv);
2274 FuncAttrs.addDenormalFPEnvAttr(MergedFPEnv);
2277 F.removeFnAttrs(AttrsToRemove);
2281 F.addFnAttrs(FuncAttrs);
2284void CodeGenModule::getTrivialDefaultFunctionAttributes(
2285 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2286 llvm::AttrBuilder &FuncAttrs) {
2288 getLangOpts(), AttrOnCallSite,
2292void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2294 bool AttrOnCallSite,
2295 llvm::AttrBuilder &FuncAttrs) {
2299 if (!AttrOnCallSite)
2305 if (!AttrOnCallSite)
2310 llvm::AttrBuilder &attrs) {
2311 getDefaultFunctionAttributes(
"",
false,
2313 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2318 const NoBuiltinAttr *NBA =
nullptr) {
2319 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2321 AttributeName +=
"no-builtin-";
2322 AttributeName += BuiltinName;
2323 FuncAttrs.addAttribute(AttributeName);
2327 if (LangOpts.NoBuiltin) {
2329 FuncAttrs.addAttribute(
"no-builtins");
2343 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2344 FuncAttrs.addAttribute(
"no-builtins");
2349 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2353 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2354 bool CheckCoerce =
true) {
2361 if (!DL.typeSizeEqualsStoreSize(Ty))
2368 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2369 DL.getTypeSizeInBits(Ty)))
2393 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2395 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2404 unsigned NumRequiredArgs,
unsigned ArgNo) {
2405 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2410 if (ArgNo >= NumRequiredArgs)
2414 if (ArgNo < FD->getNumParams()) {
2415 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2416 if (Param && Param->hasAttr<MaybeUndefAttr>())
2433 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2436 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2438 llvm::all_of(ST->elements(),
2439 llvm::AttributeFuncs::isNoFPClassCompatibleType);
2447 llvm::FPClassTest Mask = llvm::fcNone;
2448 if (LangOpts.NoHonorInfs)
2449 Mask |= llvm::fcInf;
2450 if (LangOpts.NoHonorNaNs)
2451 Mask |= llvm::fcNan;
2457 llvm::AttributeList &Attrs) {
2458 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2459 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2460 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2486 llvm::AttributeList &AttrList,
2488 bool AttrOnCallSite,
bool IsThunk) {
2496 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2498 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2509 bool HasOptnone =
false;
2511 const NoBuiltinAttr *NBA =
nullptr;
2515 std::optional<llvm::Attribute::AttrKind> MemAttrForPtrArgs;
2516 bool AddedPotentialArgAccess =
false;
2517 auto AddPotentialArgAccess = [&]() {
2518 AddedPotentialArgAccess =
true;
2519 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2521 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2522 llvm::MemoryEffects::argMemOnly());
2529 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2530 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2531 if (TargetDecl->
hasAttr<NoThrowAttr>())
2532 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2533 if (TargetDecl->
hasAttr<NoReturnAttr>())
2534 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2535 if (TargetDecl->
hasAttr<ColdAttr>())
2536 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2537 if (TargetDecl->
hasAttr<HotAttr>())
2538 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2539 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2540 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2541 if (TargetDecl->
hasAttr<ConvergentAttr>())
2542 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2544 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2547 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2549 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2551 (Kind == OO_New || Kind == OO_Array_New))
2552 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2555 const bool IsVirtualCall = MD && MD->
isVirtual();
2558 if (!(AttrOnCallSite && IsVirtualCall)) {
2559 if (Fn->isNoReturn())
2560 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2561 NBA = Fn->getAttr<NoBuiltinAttr>();
2568 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2569 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2573 if (TargetDecl->
hasAttr<ConstAttr>()) {
2574 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2575 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2578 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2579 MemAttrForPtrArgs = llvm::Attribute::ReadNone;
2580 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2581 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2582 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2584 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2585 MemAttrForPtrArgs = llvm::Attribute::ReadOnly;
2586 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2587 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2588 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2590 if (
const auto *RA = TargetDecl->
getAttr<RestrictAttr>();
2591 RA && RA->getDeallocator() ==
nullptr)
2592 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2593 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2594 !CodeGenOpts.NullPointerIsValid)
2595 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2596 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2597 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2598 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2599 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2600 if (TargetDecl->
hasAttr<LeafAttr>())
2601 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2602 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2603 FuncAttrs.addAttribute(
"bpf_fastcall");
2605 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2606 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2607 std::optional<unsigned> NumElemsParam;
2608 if (AllocSize->getNumElemsParam().isValid())
2609 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2610 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2620 FuncAttrs.addAttribute(
"uniform-work-group-size");
2622 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2623 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2625 if (
auto *ModularFormat = TargetDecl->
getAttr<ModularFormatAttr>()) {
2626 FormatAttr *Format = TargetDecl->
getAttr<FormatAttr>();
2627 StringRef
Type = Format->getType()->getName();
2628 std::string FormatIdx = std::to_string(Format->getFormatIdx());
2629 std::string FirstArg = std::to_string(Format->getFirstArg());
2631 Type, FormatIdx, FirstArg,
2632 ModularFormat->getModularImplFn()->getName(),
2633 ModularFormat->getImplName()};
2634 llvm::append_range(Args, ModularFormat->aspects());
2635 FuncAttrs.addAttribute(
"modular-format", llvm::join(Args,
","));
2648 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2653 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2654 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2655 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2656 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2657 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2658 FuncAttrs.removeAttribute(
"split-stack");
2659 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2662 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2663 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2664 FuncAttrs.addAttribute(
2665 "zero-call-used-regs",
2666 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2673 if (CodeGenOpts.NoPLT) {
2674 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2675 if (!Fn->isDefined() && !AttrOnCallSite) {
2676 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2681 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2682 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2687 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2688 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2689 if (!FD->isExternallyVisible())
2690 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2697 if (!AttrOnCallSite) {
2698 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2699 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2702 auto shouldDisableTailCalls = [&] {
2704 if (CodeGenOpts.DisableTailCalls)
2710 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2711 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2714 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2715 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2716 if (!BD->doesNotEscape())
2722 if (shouldDisableTailCalls())
2723 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2728 static const llvm::StringSet<> ReturnsTwiceFn{
2729 "_setjmpex",
"setjmp",
"_setjmp",
"vfork",
2730 "sigsetjmp",
"__sigsetjmp",
"savectx",
"getcontext"};
2731 if (ReturnsTwiceFn.contains(Name))
2732 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2736 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2739 if (!MSHotPatchFunctions.empty()) {
2740 bool IsHotPatched = llvm::binary_search(MSHotPatchFunctions, Name);
2742 FuncAttrs.addAttribute(
"marked_for_windows_hot_patching");
2747 if (CodeGenOpts.isLoaderReplaceableFunctionName(Name))
2748 FuncAttrs.addAttribute(
"loader-replaceable");
2751 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2758 if (CodeGenOpts.EnableNoundefAttrs &&
2762 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2768 RetAttrs.addAttribute(llvm::Attribute::SExt);
2770 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2772 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2777 RetAttrs.addAttribute(llvm::Attribute::InReg);
2789 AddPotentialArgAccess();
2798 llvm_unreachable(
"Invalid ABI kind for return argument");
2806 RetAttrs.addDereferenceableAttr(
2808 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2809 !CodeGenOpts.NullPointerIsValid)
2810 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2812 llvm::Align Alignment =
2814 RetAttrs.addAlignmentAttr(Alignment);
2819 bool hasUsedSRet =
false;
2823 if (IRFunctionArgs.hasSRetArg()) {
2825 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2826 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2827 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2830 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2832 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2837 if (IRFunctionArgs.hasInallocaArg()) {
2840 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2850 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2852 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2858 if (!CodeGenOpts.NullPointerIsValid &&
2860 Attrs.addAttribute(llvm::Attribute::NonNull);
2867 Attrs.addDereferenceableOrNullAttr(
2873 llvm::Align Alignment =
2877 Attrs.addAlignmentAttr(Alignment);
2879 const auto *DD = dyn_cast_if_present<CXXDestructorDecl>(
2887 CodeGenOpts.StrictLifetimes) {
2889 dyn_cast<CXXRecordDecl>(DD->getDeclContext());
2896 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo(
2897 Context.getASTRecordLayout(ClassDecl).getDataSize().getQuantity()));
2900 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2905 I != E; ++I, ++ArgNo) {
2911 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2913 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2916 .addAttribute(llvm::Attribute::InReg));
2921 if (CodeGenOpts.EnableNoundefAttrs &&
2923 Attrs.addAttribute(llvm::Attribute::NoUndef);
2932 Attrs.addAttribute(llvm::Attribute::SExt);
2934 Attrs.addAttribute(llvm::Attribute::ZExt);
2936 Attrs.addAttribute(llvm::Attribute::NoExt);
2941 Attrs.addAttribute(llvm::Attribute::Nest);
2943 Attrs.addAttribute(llvm::Attribute::InReg);
2944 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2951 Attrs.addAttribute(llvm::Attribute::InReg);
2963 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2971 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo());
2976 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2977 Decl->getArgPassingRestrictions() ==
2981 Attrs.addAttribute(llvm::Attribute::NoAlias);
3006 AddPotentialArgAccess();
3011 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
3022 AddPotentialArgAccess();
3030 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
3031 !CodeGenOpts.NullPointerIsValid)
3032 Attrs.addAttribute(llvm::Attribute::NonNull);
3034 llvm::Align Alignment =
3036 Attrs.addAlignmentAttr(Alignment);
3045 DeviceKernelAttr::isOpenCLSpelling(
3046 TargetDecl->
getAttr<DeviceKernelAttr>()) &&
3050 llvm::Align Alignment =
3052 Attrs.addAlignmentAttr(Alignment);
3059 Attrs.addAttribute(llvm::Attribute::NoAlias);
3068 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
3073 Attrs.addAttribute(llvm::Attribute::NoAlias);
3077 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
3078 auto info =
getContext().getTypeInfoInChars(PTy);
3079 Attrs.addDereferenceableAttr(info.Width.getQuantity());
3080 Attrs.addAlignmentAttr(info.Align.getAsAlign());
3086 Attrs.addAttribute(llvm::Attribute::SwiftError);
3090 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
3094 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
3099 Attrs.addCapturesAttr(llvm::CaptureInfo::none());
3101 if (Attrs.hasAttributes()) {
3102 unsigned FirstIRArg, NumIRArgs;
3103 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3104 for (
unsigned i = 0; i < NumIRArgs; i++)
3105 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
3112 if (AddedPotentialArgAccess && MemAttrForPtrArgs) {
3116 I != E; ++I, ++ArgNo) {
3117 if (I->info.isDirect() || I->info.isExpand() ||
3118 I->info.isCoerceAndExpand()) {
3119 unsigned FirstIRArg, NumIRArgs;
3120 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3121 for (
unsigned i = FirstIRArg; i < FirstIRArg + NumIRArgs; ++i) {
3131 AttrList = llvm::AttributeList::get(
3140 llvm::Value *value) {
3141 llvm::Type *varType = CGF.
ConvertType(var->getType());
3145 if (value->getType() == varType)
3148 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) &&
3149 "unexpected promotion type");
3152 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
3154 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
3160 QualType ArgType,
unsigned ArgNo) {
3168 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
3172 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
3179 if (NNAttr->isNonNull(ArgNo))
3186struct CopyBackSwiftError final : EHScopeStack::Cleanup {
3189 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(
arg) {}
3190 void Emit(CodeGenFunction &CGF, Flags flags)
override {
3209 if (FD->hasImplicitReturnZero()) {
3210 QualType RetTy = FD->getReturnType().getUnqualifiedType();
3211 llvm::Type *LLVMTy =
CGM.getTypes().ConvertType(RetTy);
3212 llvm::Constant *
Zero = llvm::Constant::getNullValue(LLVMTy);
3220 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), FI);
3221 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
3226 if (IRFunctionArgs.hasInallocaArg())
3227 ArgStruct =
Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
3231 if (IRFunctionArgs.hasSRetArg()) {
3232 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
3233 AI->setName(
"agg.result");
3234 AI->addAttr(llvm::Attribute::NoAlias);
3241 ArgVals.reserve(Args.size());
3247 assert(FI.
arg_size() == Args.size() &&
3248 "Mismatch between function signature & arguments.");
3251 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e;
3252 ++i, ++info_it, ++ArgNo) {
3265 unsigned FirstIRArg, NumIRArgs;
3266 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3270 assert(NumIRArgs == 0);
3283 assert(NumIRArgs == 1);
3306 llvm::ConstantInt::get(
IntPtrTy, Size.getQuantity()));
3307 ParamAddr = AlignedTemp;
3324 auto AI = Fn->getArg(FirstIRArg);
3332 assert(NumIRArgs == 1);
3334 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3337 PVD->getFunctionScopeIndex()) &&
3338 !
CGM.getCodeGenOpts().NullPointerIsValid)
3339 AI->addAttr(llvm::Attribute::NonNull);
3341 QualType OTy = PVD->getOriginalType();
3342 if (
const auto *ArrTy =
getContext().getAsConstantArrayType(OTy)) {
3348 QualType ETy = ArrTy->getElementType();
3349 llvm::Align Alignment =
3350 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3352 .addAlignmentAttr(Alignment));
3353 uint64_t ArrSize = ArrTy->getZExtSize();
3357 Attrs.addDereferenceableAttr(
3358 getContext().getTypeSizeInChars(ETy).getQuantity() *
3360 AI->addAttrs(Attrs);
3361 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3363 !
CGM.getCodeGenOpts().NullPointerIsValid) {
3364 AI->addAttr(llvm::Attribute::NonNull);
3367 }
else if (
const auto *ArrTy =
3373 QualType ETy = ArrTy->getElementType();
3374 llvm::Align Alignment =
3375 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3377 .addAlignmentAttr(Alignment));
3378 if (!
getTypes().getTargetAddressSpace(ETy) &&
3379 !
CGM.getCodeGenOpts().NullPointerIsValid)
3380 AI->addAttr(llvm::Attribute::NonNull);
3385 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3388 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3389 if (AVAttr && !
SanOpts.has(SanitizerKind::Alignment)) {
3393 llvm::ConstantInt *AlignmentCI =
3395 uint64_t AlignmentInt =
3396 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3397 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3398 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3400 .addAlignmentAttr(llvm::Align(AlignmentInt)));
3407 AI->addAttr(llvm::Attribute::NoAlias);
3415 assert(NumIRArgs == 1);
3419 llvm::Value *
V = AI;
3427 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3428 llvm::Value *incomingErrorValue =
Builder.CreateLoad(arg);
3429 Builder.CreateStore(incomingErrorValue, temp);
3450 if (
V->getType() != LTy)
3461 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3462 llvm::Value *ArgVal = Fn->getArg(FirstIRArg);
3463 if (
auto *VecTyFrom =
3464 dyn_cast<llvm::ScalableVectorType>(ArgVal->getType())) {
3466 *
this, VecTyTo, VecTyFrom, ArgVal, Arg->
getName());
3468 assert(NumIRArgs == 1);
3475 llvm::StructType *STy =
3486 STy->getNumElements() > 1) {
3487 llvm::TypeSize StructSize =
CGM.getDataLayout().getTypeAllocSize(STy);
3488 llvm::TypeSize PtrElementSize =
3490 if (StructSize.isScalable()) {
3491 assert(STy->containsHomogeneousScalableVectorTypes() &&
3492 "ABI only supports structure with homogeneous scalable vector "
3494 assert(StructSize == PtrElementSize &&
3495 "Only allow non-fractional movement of structure with"
3496 "homogeneous scalable vector type");
3497 assert(STy->getNumElements() == NumIRArgs);
3499 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3500 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3501 auto *AI = Fn->getArg(FirstIRArg + i);
3502 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3504 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3507 Builder.CreateStore(LoadedStructValue, Ptr);
3509 uint64_t SrcSize = StructSize.getFixedValue();
3510 uint64_t DstSize = PtrElementSize.getFixedValue();
3513 if (SrcSize <= DstSize) {
3520 assert(STy->getNumElements() == NumIRArgs);
3521 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3522 auto AI = Fn->getArg(FirstIRArg + i);
3523 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3525 Builder.CreateStore(AI, EltPtr);
3528 if (SrcSize > DstSize) {
3529 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3541 assert(NumIRArgs == 1);
3542 auto AI = Fn->getArg(FirstIRArg);
3543 AI->setName(Arg->
getName() +
".coerce");
3546 llvm::TypeSize::getFixed(
3547 getContext().getTypeSizeInChars(Ty).getQuantity() -
3572 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3576 unsigned argIndex = FirstIRArg;
3577 unsigned unpaddedIndex = 0;
3578 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3579 llvm::Type *eltType = coercionType->getElementType(i);
3583 auto eltAddr =
Builder.CreateStructGEP(alloca, i);
3584 llvm::Value *elt = Fn->getArg(argIndex++);
3586 auto paramType = unpaddedStruct
3587 ? unpaddedStruct->getElementType(unpaddedIndex++)
3588 : unpaddedCoercionType;
3590 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(eltType)) {
3591 if (
auto *VecTyFrom = dyn_cast<llvm::ScalableVectorType>(paramType)) {
3594 *
this, VecTyTo, VecTyFrom, elt, elt->getName());
3595 assert(Extracted &&
"Unexpected scalable to fixed vector coercion");
3598 Builder.CreateStore(elt, eltAddr);
3600 assert(argIndex == FirstIRArg + NumIRArgs);
3612 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3613 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3614 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3615 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3616 auto AI = Fn->getArg(FirstIRArg + i);
3617 AI->setName(Arg->
getName() +
"." + Twine(i));
3623 auto *AI = Fn->getArg(FirstIRArg);
3624 AI->setName(Arg->
getName() +
".target_coerce");
3628 CGM.getABIInfo().createCoercedStore(AI, Ptr, ArgI,
false, *
this);
3642 assert(NumIRArgs == 0);
3654 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3655 for (
int I = Args.size() - 1; I >= 0; --I)
3658 for (
unsigned I = 0, E = Args.size(); I != E; ++I)
3664 while (insn->use_empty()) {
3665 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3671 bitcast->eraseFromParent();
3677 llvm::Value *result) {
3679 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3682 if (&BB->back() != result)
3685 llvm::Type *resultType = result->getType();
3694 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3700 if (generator->getNextNode() != bitcast)
3703 InstsToKill.push_back(bitcast);
3710 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3714 bool doRetainAutorelease;
3717 doRetainAutorelease =
true;
3718 }
else if (call->getCalledOperand() ==
3720 doRetainAutorelease =
false;
3728 llvm::Instruction *prev = call->getPrevNode();
3731 prev = prev->getPrevNode();
3737 InstsToKill.push_back(prev);
3743 result = call->getArgOperand(0);
3744 InstsToKill.push_back(call);
3748 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3749 if (!bitcast->hasOneUse())
3751 InstsToKill.push_back(bitcast);
3752 result = bitcast->getOperand(0);
3756 for (
auto *I : InstsToKill)
3757 I->eraseFromParent();
3760 if (doRetainAutorelease)
3764 return CGF.
Builder.CreateBitCast(result, resultType);
3769 llvm::Value *result) {
3772 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3781 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3782 if (!retainCall || retainCall->getCalledOperand() !=
3787 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3788 llvm::LoadInst *load =
3789 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3790 if (!load || load->isAtomic() || load->isVolatile() ||
3797 llvm::Type *resultType = result->getType();
3799 assert(retainCall->use_empty());
3800 retainCall->eraseFromParent();
3803 return CGF.
Builder.CreateBitCast(load, resultType);
3810 llvm::Value *result) {
3833 auto GetStoreIfValid = [&CGF,
3834 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3835 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3836 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3842 assert(!SI->isAtomic() &&
3850 if (!ReturnValuePtr->hasOneUse()) {
3851 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3857 const llvm::Instruction *LoadIntoFakeUse =
nullptr;
3858 for (llvm::Instruction &I : llvm::reverse(*IP)) {
3862 if (LoadIntoFakeUse == &I)
3866 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) {
3867 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3870 if (II->getIntrinsicID() == llvm::Intrinsic::fake_use) {
3871 LoadIntoFakeUse = dyn_cast<llvm::Instruction>(II->getArgOperand(0));
3875 return GetStoreIfValid(&I);
3880 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3886 llvm::BasicBlock *StoreBB = store->getParent();
3887 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3889 while (IP != StoreBB) {
3890 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3906 int BitWidth,
int CharWidth) {
3907 assert(CharWidth <= 64);
3908 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3911 if (BitOffset >= CharWidth) {
3912 Pos += BitOffset / CharWidth;
3913 BitOffset = BitOffset % CharWidth;
3916 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3917 if (BitOffset + BitWidth >= CharWidth) {
3918 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3919 BitWidth -= CharWidth - BitOffset;
3923 while (BitWidth >= CharWidth) {
3925 BitWidth -= CharWidth;
3929 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3937 int StorageSize,
int BitOffset,
int BitWidth,
3938 int CharWidth,
bool BigEndian) {
3941 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3944 std::reverse(TmpBits.begin(), TmpBits.end());
3946 for (uint64_t
V : TmpBits)
3947 Bits[StorageOffset++] |=
V;
3950static void setUsedBits(CodeGenModule &, QualType,
int,
3951 SmallVectorImpl<uint64_t> &);
3962 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3993 QualType ETy = Context.getBaseElementType(ATy);
3994 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3998 for (
int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3999 auto Src = TmpBits.begin();
4000 auto Dst = Bits.begin() + Offset + I * Size;
4001 for (
int J = 0; J < Size; ++J)
4014 if (
const auto *ATy = Context.getAsConstantArrayType(QTy))
4017 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
4021 std::fill_n(Bits.begin() + Offset, Size,
4022 (uint64_t(1) << Context.getCharWidth()) - 1);
4026 int Pos,
int Size,
int CharWidth,
4031 for (
auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
4033 Mask = (Mask << CharWidth) | *P;
4035 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
4037 Mask = (Mask << CharWidth) | *--P;
4046 llvm::IntegerType *ITy,
4048 assert(Src->getType() == ITy);
4049 assert(ITy->getScalarSizeInBits() <= 64);
4051 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
4052 int Size = DataLayout.getTypeStoreSize(ITy);
4056 int CharWidth =
CGM.getContext().getCharWidth();
4060 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
4066 llvm::ArrayType *ATy,
4068 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
4069 int Size = DataLayout.getTypeStoreSize(ATy);
4074 int CharWidth =
CGM.getContext().getCharWidth();
4076 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
4078 llvm::Value *R = llvm::PoisonValue::get(ATy);
4079 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
4081 DataLayout.isBigEndian());
4082 MaskIndex += CharsPerElt;
4083 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
4084 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
4085 R =
Builder.CreateInsertValue(R, T1, I);
4093 uint64_t RetKeyInstructionsSourceAtom) {
4108 auto *I =
Builder.CreateRetVoid();
4109 if (RetKeyInstructionsSourceAtom)
4116 llvm::DebugLoc RetDbgLoc;
4117 llvm::Value *RV =
nullptr;
4127 llvm::Function::arg_iterator EI =
CurFn->arg_end();
4129 llvm::Value *ArgStruct = &*EI;
4130 llvm::Value *SRet =
Builder.CreateStructGEP(
4139 auto AI =
CurFn->arg_begin();
4157 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
4184 RetDbgLoc = SI->getDebugLoc();
4186 RV = SI->getValueOperand();
4187 SI->eraseFromParent();
4210 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
4211 RT = FD->getReturnType();
4212 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
4213 RT = MD->getReturnType();
4215 RT =
BlockInfo->BlockExpression->getFunctionType()->getReturnType();
4217 llvm_unreachable(
"Unexpected function/method type");
4233 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
4238 unsigned unpaddedIndex = 0;
4239 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4240 auto coercedEltType = coercionType->getElementType(i);
4244 auto eltAddr =
Builder.CreateStructGEP(addr, i);
4247 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
4248 : unpaddedCoercionType,
4250 results.push_back(elt);
4254 if (results.size() == 1) {
4262 RV = llvm::PoisonValue::get(returnType);
4263 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
4264 RV =
Builder.CreateInsertValue(RV, results[i], i);
4271 RV =
CGM.getABIInfo().createCoercedLoad(
V, RetAI, *
this);
4276 llvm_unreachable(
"Invalid ABI kind for return argument");
4279 llvm::Instruction *Ret;
4285 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4292 Ret =
Builder.CreateRetVoid();
4296 Ret->setDebugLoc(std::move(RetDbgLoc));
4298 llvm::Value *Backup = RV ? Ret->getOperand(0) :
nullptr;
4299 if (RetKeyInstructionsSourceAtom)
4315 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4316 if (
SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
4317 RetNNAttr =
CurCodeDecl->getAttr<ReturnsNonNullAttr>();
4319 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4327 assert(!requiresReturnValueNullabilityCheck() &&
4328 "Cannot check nullability and the nonnull attribute");
4329 AttrLoc = RetNNAttr->getLocation();
4330 CheckKind = SanitizerKind::SO_ReturnsNonnullAttribute;
4331 Handler = SanitizerHandler::NonnullReturn;
4333 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4334 if (
auto *TSI = DD->getTypeSourceInfo())
4336 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4337 CheckKind = SanitizerKind::SO_NullabilityReturn;
4338 Handler = SanitizerHandler::NullabilityReturn;
4347 llvm::Value *SLocPtr =
Builder.CreateLoad(ReturnLocation,
"return.sloc.load");
4348 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4349 if (requiresReturnValueNullabilityCheck())
4351 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4352 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4358 llvm::Value *DynamicData[] = {SLocPtr};
4359 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, DynamicData);
4378 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4379 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4404 if (
type->isReferenceType()) {
4413 param->
hasAttr<NSConsumedAttr>() &&
type->isObjCRetainableType()) {
4414 llvm::Value *ptr =
Builder.CreateLoad(local);
4417 Builder.CreateStore(null, local);
4428 type->castAsRecordDecl()->isParamDestroyedInCallee() &&
4433 "cleanup for callee-destructed param not recorded");
4435 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4441 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4451 const LValue &srcLV = writeback.
Source;
4452 Address srcAddr = srcLV.getAddress();
4454 "shouldn't have writeback for provably null argument");
4462 llvm::BasicBlock *contBB =
nullptr;
4468 if (!provablyNonNull) {
4473 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4482 "icr.writeback-cast");
4491 if (writeback.
ToUse) {
4516 if (!provablyNonNull)
4525 for (
const auto &I : llvm::reverse(Cleanups)) {
4527 I.IsActiveIP->eraseFromParent();
4533 if (uop->getOpcode() == UO_AddrOf)
4534 return uop->getSubExpr();
4559 Address srcAddr = srcLV.getAddress();
4564 llvm::PointerType *destType =
4566 llvm::Type *destElemType =
4593 llvm::BasicBlock *contBB =
nullptr;
4594 llvm::BasicBlock *originBB =
nullptr;
4597 llvm::Value *finalArgument;
4601 if (provablyNonNull) {
4606 finalArgument = CGF.
Builder.CreateSelect(
4607 isNull, llvm::ConstantPointerNull::get(destType),
4613 originBB = CGF.
Builder.GetInsertBlock();
4616 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4618 condEval.
begin(CGF);
4622 llvm::Value *valueToUse =
nullptr;
4630 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4647 if (shouldCopy && !provablyNonNull) {
4648 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4653 llvm::PHINode *phiToUse =
4654 CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
"icr.to-use");
4655 phiToUse->addIncoming(valueToUse, copyBB);
4656 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4658 valueToUse = phiToUse;
4672 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4678 CGF.
Builder.CreateStackRestore(StackBase);
4685 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4686 SanOpts.has(SanitizerKind::NullabilityArg)))
4691 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4694 const NonNullAttr *NNAttr =
nullptr;
4695 if (
SanOpts.has(SanitizerKind::NonnullAttribute))
4698 bool CanCheckNullability =
false;
4699 if (
SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4700 !PVD->getType()->isRecordType()) {
4701 auto Nullability = PVD->getType()->getNullability();
4702 CanCheckNullability = Nullability &&
4704 PVD->getTypeSourceInfo();
4707 if (!NNAttr && !CanCheckNullability)
4714 AttrLoc = NNAttr->getLocation();
4715 CheckKind = SanitizerKind::SO_NonnullAttribute;
4716 Handler = SanitizerHandler::NonnullArg;
4718 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4719 CheckKind = SanitizerKind::SO_NullabilityArg;
4720 Handler = SanitizerHandler::NullabilityArg;
4725 llvm::Constant *StaticData[] = {
4728 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4730 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, {});
4736 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4737 SanOpts.has(SanitizerKind::NullabilityArg)))
4756 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4767 return classDecl->getTypeParamListAsWritten();
4771 return catDecl->getTypeParamList();
4781 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4785 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4786 "Can't skip parameters if type info is not provided");
4796 bool IsVariadic =
false;
4798 const auto *MD = dyn_cast<const ObjCMethodDecl *>(
Prototype.P);
4800 IsVariadic = MD->isVariadic();
4802 MD,
CGM.getTarget().getTriple().isOSWindows());
4803 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4804 MD->param_type_end());
4807 IsVariadic = FPT->isVariadic();
4808 ExplicitCC = FPT->getExtInfo().getCC();
4809 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4810 FPT->param_type_end());
4818 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4820 QualType ArgTy = (*Arg)->getType();
4821 if (
const auto *OBT = ParamTy->
getAs<OverflowBehaviorType>())
4822 ParamTy = OBT->getUnderlyingType();
4823 if (
const auto *OBT = ArgTy->
getAs<OverflowBehaviorType>())
4824 ArgTy = OBT->getUnderlyingType();
4827 getContext().getCanonicalType(ParamTy).getTypePtr() ==
4828 getContext().getCanonicalType(ArgTy).getTypePtr()) &&
4829 "type mismatch in call argument!");
4835 assert((Arg == ArgRange.end() || IsVariadic) &&
4836 "Extra arguments in non-variadic function!");
4841 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4842 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4843 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4851 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4855 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4864 auto SizeTy = Context.getSizeType();
4866 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4867 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(
4868 Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic());
4873 std::swap(Args.back(), *(&Args.back() - 1));
4878 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4879 "inalloca only supported on x86");
4884 size_t CallArgsStart = Args.size();
4885 for (
unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4886 unsigned Idx = LeftToRight ? I : E - I - 1;
4888 unsigned InitialArgSize = Args.size();
4892 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4896 "Argument and parameter types don't match");
4900 assert(InitialArgSize + 1 == Args.size() &&
4901 "The code below depends on only adding one arg per EmitCallArg");
4902 (void)InitialArgSize;
4905 if (!Args.back().hasLValue()) {
4906 RValue RVArg = Args.back().getKnownRValue();
4908 ParamsToSkip + Idx);
4912 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4919 std::reverse(Args.begin() + CallArgsStart, Args.end());
4928struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4961 if (!HasLV &&
RV.isScalar())
4963 else if (!HasLV &&
RV.isComplex())
4966 auto Addr = HasLV ?
LV.getAddress() :
RV.getAggregateAddress();
4970 HasLV ?
LV.isVolatileQualified()
4971 :
RV.isVolatileQualified());
4983 std::optional<DisableDebugLocationUpdates> Dis;
4987 dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
5001 "reference binding to unmaterialized r-value!");
5013 if (
type->isRecordType() &&
5014 type->castAsRecordDecl()->isParamDestroyedInCallee()) {
5021 bool DestroyedInCallee =
true, NeedsCleanup =
true;
5022 if (
const auto *RD =
type->getAsCXXRecordDecl())
5023 DestroyedInCallee = RD->hasNonTrivialDestructor();
5025 NeedsCleanup =
type.isDestructedType();
5027 if (DestroyedInCallee)
5034 if (DestroyedInCallee && NeedsCleanup) {
5041 llvm::Instruction *IsActive =
5050 !
type->isArrayParameterType() && !
type.isNonTrivialToPrimitiveCopy()) {
5060QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
5064 if (!getTarget().getTriple().isOSWindows())
5068 getContext().getTypeSize(Arg->
getType()) <
5072 return getContext().getIntPtrType();
5080void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
5081 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
5082 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
5083 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
5084 CGM.getNoObjCARCExceptionsMetadata());
5090 const llvm::Twine &name) {
5091 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value *>(), name);
5097 ArrayRef<Address> args,
5098 const llvm::Twine &name) {
5099 SmallVector<llvm::Value *, 3> values;
5100 for (
auto arg : args)
5101 values.push_back(
arg.emitRawPointer(*
this));
5102 return EmitNounwindRuntimeCall(callee, values, name);
5107 ArrayRef<llvm::Value *> args,
5108 const llvm::Twine &name) {
5109 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
5110 call->setDoesNotThrow();
5117 const llvm::Twine &name) {
5118 return EmitRuntimeCall(callee, {},
name);
5123SmallVector<llvm::OperandBundleDef, 1>
5132 if (
auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
5133 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
5134 auto IID = CalleeFn->getIntrinsicID();
5135 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
5148 const llvm::Twine &name) {
5149 llvm::CallInst *call = Builder.CreateCall(
5150 callee, args, getBundlesForFunclet(callee.getCallee()), name);
5151 call->setCallingConv(getRuntimeCC());
5153 if (CGM.shouldEmitConvergenceTokens() && call->isConvergent())
5165 llvm::InvokeInst *invoke =
Builder.CreateInvoke(
5167 invoke->setDoesNotReturn();
5170 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
5171 call->setDoesNotReturn();
5180 const Twine &name) {
5188 const Twine &name) {
5198 const Twine &Name) {
5203 llvm::CallBase *Inst;
5205 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
5208 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
5215 if (
CGM.getLangOpts().ObjCAutoRefCount)
5216 AddObjCARCExceptionMetadata(Inst);
5221void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
5223 DeferredReplacements.push_back(
5224 std::make_pair(llvm::WeakTrackingVH(Old),
New));
5231[[nodiscard]] llvm::AttributeList
5232maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
5233 const llvm::AttributeList &Attrs,
5234 llvm::Align NewAlign) {
5235 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
5236 if (CurAlign >= NewAlign)
5238 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
5239 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
5240 .addRetAttribute(Ctx, AlignAttr);
5243template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
5248 const AlignedAttrTy *AA =
nullptr;
5250 llvm::Value *Alignment =
nullptr;
5251 llvm::ConstantInt *OffsetCI =
nullptr;
5257 AA = FuncDecl->
getAttr<AlignedAttrTy>();
5262 [[nodiscard]] llvm::AttributeList
5263 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5264 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5266 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5271 if (!AlignmentCI->getValue().isPowerOf2())
5273 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5276 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5284 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
5288 AA->getLocation(), Alignment, OffsetCI);
5294class AssumeAlignedAttrEmitter final
5295 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5297 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl)
5298 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5303 if (Expr *Offset = AA->getOffset()) {
5305 if (OffsetCI->isNullValue())
5312class AllocAlignAttrEmitter final
5313 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5315 AllocAlignAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl,
5316 const CallArgList &CallArgs)
5317 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5321 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5330 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5331 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5332 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5335 unsigned MaxVectorWidth = 0;
5336 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5337 for (
auto *I : ST->elements())
5339 return MaxVectorWidth;
5346 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5348 bool IsVirtualFunctionPointerThunk) {
5351 assert(Callee.isOrdinary() || Callee.isVirtual());
5358 llvm::FunctionType *IRFuncTy =
getTypes().GetFunctionType(CallInfo);
5360 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5361 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5368 if ((TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5369 (TargetDecl->
hasAttr<TargetAttr>() ||
5373 TargetDecl->
hasAttr<TargetAttr>())))
5380 const FunctionDecl *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl);
5381 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
CGM, Loc, CallerDecl,
5382 CalleeDecl, CallArgs, RetTy);
5389 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5390 const llvm::DataLayout &DL =
CGM.getDataLayout();
5392 llvm::AllocaInst *AI;
5394 IP = IP->getNextNode();
5395 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5401 AI->setAlignment(Align.getAsAlign());
5402 AI->setUsedWithInAlloca(
true);
5403 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5404 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5407 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), CallInfo);
5413 bool NeedSRetLifetimeEnd =
false;
5419 if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.
isIndirect()) {
5421 IRFunctionArgs.getSRetArgNo(),
5430 if (IRFunctionArgs.hasSRetArg()) {
5443 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5461 assert(CallInfo.
arg_size() == CallArgs.size() &&
5462 "Mismatch between function signature & arguments.");
5465 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5466 I != E; ++I, ++info_it, ++ArgNo) {
5470 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5471 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5474 unsigned FirstIRArg, NumIRArgs;
5475 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5477 bool ArgHasMaybeUndefAttr =
5482 assert(NumIRArgs == 0);
5483 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5484 if (I->isAggregate()) {
5486 ? I->getKnownLValue().getAddress()
5487 : I->getKnownRValue().getAggregateAddress();
5488 llvm::Instruction *Placeholder =
5493 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5494 Builder.SetInsertPoint(Placeholder);
5507 deferPlaceholderReplacement(Placeholder,
Addr.getPointer());
5512 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5513 "indirect-arg-temp");
5514 I->copyInto(*
this,
Addr);
5523 I->copyInto(*
this,
Addr);
5530 assert(NumIRArgs == 1);
5531 if (I->isAggregate()) {
5541 ? I->getKnownLValue().getAddress()
5542 : I->getKnownRValue().getAggregateAddress();
5544 const llvm::DataLayout *TD = &
CGM.getDataLayout();
5546 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5547 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5548 TD->getAllocaAddrSpace()) &&
5549 "indirect argument must be in alloca address space");
5551 bool NeedCopy =
false;
5552 if (
Addr.getAlignment() < Align &&
5553 llvm::getOrEnforceKnownAlignment(
Addr.emitRawPointer(*
this),
5557 }
else if (I->hasLValue()) {
5558 auto LV = I->getKnownLValue();
5563 if (!isByValOrRef ||
5564 (LV.getAlignment() <
getContext().getTypeAlignInChars(I->Ty))) {
5568 if (isByValOrRef &&
Addr.getType()->getAddressSpace() !=
5577 auto *T = llvm::PointerType::get(
CGM.getLLVMContext(),
5585 if (ArgHasMaybeUndefAttr)
5586 Val =
Builder.CreateFreeze(Val);
5587 IRCallArgs[FirstIRArg] = Val;
5590 }
else if (I->getType()->isArrayParameterType()) {
5596 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5605 if (ArgHasMaybeUndefAttr)
5606 Val =
Builder.CreateFreeze(Val);
5607 IRCallArgs[FirstIRArg] = Val;
5612 CallLifetimeEndAfterCall.emplace_back(AI);
5615 I->copyInto(*
this, AI);
5620 assert(NumIRArgs == 0);
5628 assert(NumIRArgs == 1);
5630 if (!I->isAggregate())
5631 V = I->getKnownRValue().getScalarVal();
5634 I->hasLValue() ? I->getKnownLValue().getAddress()
5635 : I->getKnownRValue().getAggregateAddress());
5641 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5645 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5652 llvm::Value *errorValue =
Builder.CreateLoad(swiftErrorArg);
5653 Builder.CreateStore(errorValue, swiftErrorTemp);
5658 V->getType()->isIntegerTy())
5665 if (FirstIRArg < IRFuncTy->getNumParams() &&
5666 V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
5667 assert(
V->getType()->isPointerTy() &&
"Only pointers can mismatch!");
5671 if (ArgHasMaybeUndefAttr)
5673 IRCallArgs[FirstIRArg] =
V;
5677 llvm::StructType *STy =
5682 if (!I->isAggregate()) {
5684 I->copyInto(*
this, Src);
5686 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5687 : I->getKnownRValue().getAggregateAddress();
5697 llvm::TypeSize SrcTypeSize =
5698 CGM.getDataLayout().getTypeAllocSize(SrcTy);
5699 llvm::TypeSize DstTypeSize =
CGM.getDataLayout().getTypeAllocSize(STy);
5700 if (SrcTypeSize.isScalable()) {
5701 assert(STy->containsHomogeneousScalableVectorTypes() &&
5702 "ABI only supports structure with homogeneous scalable vector "
5704 assert(SrcTypeSize == DstTypeSize &&
5705 "Only allow non-fractional movement of structure with "
5706 "homogeneous scalable vector type");
5707 assert(NumIRArgs == STy->getNumElements());
5709 llvm::Value *StoredStructValue =
5711 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5712 llvm::Value *Extract =
Builder.CreateExtractValue(
5713 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5714 IRCallArgs[FirstIRArg + i] = Extract;
5717 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5718 uint64_t DstSize = DstTypeSize.getFixedValue();
5719 bool HasPFPFields =
getContext().hasPFPFields(I->Ty);
5725 if (HasPFPFields || SrcSize < DstSize) {
5736 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5742 assert(NumIRArgs == STy->getNumElements());
5743 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5745 llvm::Value *LI =
Builder.CreateLoad(EltPtr);
5746 if (ArgHasMaybeUndefAttr)
5747 LI =
Builder.CreateFreeze(LI);
5748 IRCallArgs[FirstIRArg + i] = LI;
5753 assert(NumIRArgs == 1);
5761 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5766 if (ArgHasMaybeUndefAttr)
5767 Load =
Builder.CreateFreeze(Load);
5768 IRCallArgs[FirstIRArg] = Load;
5776 auto layout =
CGM.getDataLayout().getStructLayout(coercionType);
5778 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
5782 bool NeedLifetimeEnd =
false;
5783 if (I->isAggregate()) {
5784 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5785 : I->getKnownRValue().getAggregateAddress();
5788 RValue RV = I->getKnownRValue();
5792 auto scalarAlign =
CGM.getDataLayout().getPrefTypeAlign(scalarType);
5797 layout->getAlignment(), scalarAlign)),
5799 nullptr, &AllocaAddr);
5807 unsigned IRArgPos = FirstIRArg;
5808 unsigned unpaddedIndex = 0;
5809 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5810 llvm::Type *eltType = coercionType->getElementType(i);
5817 : unpaddedCoercionType,
5819 if (ArgHasMaybeUndefAttr)
5820 elt =
Builder.CreateFreeze(elt);
5821 IRCallArgs[IRArgPos++] = elt;
5823 assert(IRArgPos == FirstIRArg + NumIRArgs);
5825 if (NeedLifetimeEnd)
5831 unsigned IRArgPos = FirstIRArg;
5832 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5833 assert(IRArgPos == FirstIRArg + NumIRArgs);
5839 if (!I->isAggregate()) {
5841 I->copyInto(*
this, Src);
5843 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5844 : I->getKnownRValue().getAggregateAddress();
5850 CGM.getABIInfo().createCoercedLoad(Src, ArgInfo, *
this);
5851 IRCallArgs[FirstIRArg] = Load;
5857 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*
this);
5863 assert(IRFunctionArgs.hasInallocaArg());
5864 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5875 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5876 llvm::Value *Ptr) -> llvm::Function * {
5877 if (!CalleeFT->isVarArg())
5881 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5882 if (CE->getOpcode() == llvm::Instruction::BitCast)
5883 Ptr = CE->getOperand(0);
5886 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5890 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5894 if (OrigFT->isVarArg() ||
5895 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5896 OrigFT->getReturnType() != CalleeFT->getReturnType())
5899 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5900 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5906 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5908 IRFuncTy = OrigFn->getFunctionType();
5919 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5920 LargestVectorWidth = std::max(LargestVectorWidth,
5925 llvm::AttributeList Attrs;
5926 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5931 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5932 getTarget().getTriple().isWindowsArm64EC()) {
5933 CGM.Error(Loc,
"__vectorcall calling convention is not currently "
5938 if (FD->hasAttr<StrictFPAttr>())
5940 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5945 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5946 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
5951 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5955 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5960 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
5961 CallerDecl, CalleeDecl))
5963 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5968 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5977 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>()) &&
5978 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
5979 CallerDecl, CalleeDecl)) {
5981 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5986 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5993 CannotThrow =
false;
6002 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
6004 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
6005 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
6013 if (NeedSRetLifetimeEnd)
6021 if (
SanOpts.has(SanitizerKind::KCFI) &&
6022 !isa_and_nonnull<FunctionDecl>(TargetDecl))
6029 if (FD->hasAttr<StrictFPAttr>())
6031 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
6033 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
6034 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
6036 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
6037 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
6042 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
6045 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
6049 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
6050 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
6055 if (
CGM.getCodeGenOpts().CallGraphSection) {
6059 else if (
const auto *FPT =
6060 Callee.getAbstractInfo().getCalleeFunctionProtoType())
6064 "Cannot find the callee type to generate callee_type metadata.");
6068 CGM.createCalleeTypeMetadataForIcall(CST, *callOrInvoke);
6075 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
6076 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
6077 if (A->getGuard() == CFGuardAttr::GuardArg::nocf &&
6078 !CI->getCalledFunction())
6084 CI->setAttributes(Attrs);
6085 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
6089 if (!CI->getType()->isVoidTy())
6090 CI->setName(
"call");
6092 if (
CGM.shouldEmitConvergenceTokens() && CI->isConvergent())
6093 CI = addConvergenceControlToken(CI);
6096 LargestVectorWidth =
6102 if (!CI->getCalledFunction())
6103 PGO->valueProfile(
Builder, llvm::IPVK_IndirectCallTarget, CI, CalleePtr);
6107 if (
CGM.getLangOpts().ObjCAutoRefCount)
6108 AddObjCARCExceptionMetadata(CI);
6111 bool IsPPC =
getTarget().getTriple().isPPC();
6112 bool IsMIPS =
getTarget().getTriple().isMIPS();
6113 bool HasMips16 =
false;
6116 HasMips16 = TargetOpts.
FeatureMap.lookup(
"mips16");
6118 HasMips16 = llvm::is_contained(TargetOpts.
Features,
"+mips16");
6120 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
6121 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
6122 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
6123 else if (IsMustTail) {
6126 CGM.getDiags().Report(Loc, diag::err_aix_musttail_unsupported);
6129 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 0;
6130 else if (
Call->isIndirectCall())
6131 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 1;
6132 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
6137 CGM.addUndefinedGlobalForTailCall(
6140 llvm::GlobalValue::LinkageTypes
Linkage =
CGM.getFunctionLinkage(
6142 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
6143 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
6144 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail)
6152 CGM.getDiags().Report(Loc, diag::err_mips_impossible_musttail) << 0;
6153 else if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
6154 CGM.addUndefinedGlobalForTailCall({FD, Loc});
6156 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
6165 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
6166 llvm::ConstantInt *
Line =
6168 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(
Line);
6170 CI->setMetadata(
"srcloc", MDT);
6178 if (CI->doesNotReturn()) {
6179 if (NeedSRetLifetimeEnd)
6183 if (
SanOpts.has(SanitizerKind::Unreachable)) {
6186 if (
auto *F = CI->getCalledFunction())
6187 F->removeFnAttr(llvm::Attribute::NoReturn);
6188 CI->removeFnAttr(llvm::Attribute::NoReturn);
6192 if (
SanOpts.hasOneOf(SanitizerKind::Address |
6193 SanitizerKind::KernelAddress)) {
6195 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
6197 auto *FnType = llvm::FunctionType::get(
CGM.VoidTy,
false);
6198 llvm::FunctionCallee Fn =
6199 CGM.CreateRuntimeFunction(FnType,
"__asan_handle_no_return");
6205 Builder.ClearInsertionPoint();
6224 if (Cleanup && Cleanup->isFakeUse()) {
6225 CGBuilderTy::InsertPointGuard IPG(
Builder);
6227 Cleanup->getCleanup()->Emit(*
this, EHScopeStack::Cleanup::Flags());
6228 }
else if (!(Cleanup &&
6229 Cleanup->getCleanup()->isRedundantBeforeReturn())) {
6230 CGM.ErrorUnsupported(
MustTailCall,
"tail call skipping over cleanups");
6233 if (CI->getType()->isVoidTy())
6237 Builder.ClearInsertionPoint();
6243 if (swiftErrorTemp.
isValid()) {
6244 llvm::Value *errorResult =
Builder.CreateLoad(swiftErrorTemp);
6245 Builder.CreateStore(errorResult, swiftErrorArg);
6262 if (IsVirtualFunctionPointerThunk) {
6275 unsigned unpaddedIndex = 0;
6276 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
6277 llvm::Type *eltType = coercionType->getElementType(i);
6281 llvm::Value *elt = CI;
6282 if (requiresExtract)
6283 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
6285 assert(unpaddedIndex == 0);
6286 Builder.CreateStore(elt, eltAddr);
6294 if (NeedSRetLifetimeEnd)
6311 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
6312 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
6320 llvm::Value *
V = CI;
6321 if (
V->getType() != RetIRTy)
6331 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
6332 llvm::Value *
V = CI;
6333 if (
auto *ScalableSrcTy =
6334 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
6335 if (FixedDstTy->getElementType() ==
6336 ScalableSrcTy->getElementType()) {
6337 V =
Builder.CreateExtractVector(FixedDstTy,
V, uint64_t(0),
6347 getContext().getTypeInfoDataSizeInChars(RetTy).Width.getQuantity();
6351 DestIsVolatile =
false;
6352 DestSize =
getContext().getTypeSizeInChars(RetTy).getQuantity();
6362 CI, RetTy, StorePtr,
6376 DestIsVolatile =
false;
6378 CGM.getABIInfo().createCoercedStore(CI, StorePtr, RetAI, DestIsVolatile,
6385 llvm_unreachable(
"Invalid ABI kind for return argument");
6388 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6393 if (Ret.isScalar() && TargetDecl) {
6394 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6395 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6401 LifetimeEnd.Emit(*
this, {});
6413 if (CalleeDecl && !CalleeDecl->
hasAttr<NoDebugAttr>() &&
6414 DI->getCallSiteRelatedAttrs() != llvm::DINode::FlagZero) {
6415 CodeGenFunction CalleeCGF(
CGM);
6417 Callee.getAbstractInfo().getCalleeDecl();
6418 CalleeCGF.
CurGD = CalleeGlobalDecl;
6421 DI->EmitFuncDeclForCallSite(
6422 CI, DI->getFunctionType(CalleeDecl, ResTy, Args), CalleeGlobalDecl);
6425 DI->addCallTargetIfVirtual(CalleeDecl, CI);
6451 if (
VE->isMicrosoftABI())
6452 return CGM.getABIInfo().EmitMSVAArg(*
this, VAListAddr, Ty, Slot);
6453 return CGM.getABIInfo().EmitVAArg(*
this, VAListAddr, Ty, Slot);
6458 CGF.disableDebugInfo();
6462 CGF.enableDebugInfo();
static ExtParameterInfoList getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static CanQualTypeList getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsTargetDefaultMSABI)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static llvm::Value * CreatePFPCoercedLoad(Address Src, QualType SrcFETy, llvm::Type *Ty, CodeGenFunction &CGF)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
SmallVector< CanQualType, 16 > CanQualTypeList
static std::pair< llvm::Value *, bool > CoerceScalableToFixed(CodeGenFunction &CGF, llvm::FixedVectorType *ToTy, llvm::ScalableVectorType *FromTy, llvm::Value *V, StringRef Name="")
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static llvm::Value * CreateCoercedLoad(Address Src, QualType SrcFETy, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
SmallVector< FunctionProtoType::ExtParameterInfo, 16 > ExtParameterInfoList
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static CanQualTypeList getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static bool CreatePFPCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, CodeGenFunction &CGF)
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
static void addNoBuiltinAttributes(mlir::MLIRContext &ctx, mlir::NamedAttrList &attrs, const LangOptions &langOpts, const NoBuiltinAttr *nba=nullptr)
static void addDenormalModeAttrs(llvm::DenormalMode fpDenormalMode, llvm::DenormalMode fp32DenormalMode, mlir::NamedAttrList &attrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void appendParameterTypes(const CIRGenTypes &cgt, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > fpt)
Adds the formal parameters in FPT to the given prefix.
static const CIRGenFunctionInfo & arrangeFreeFunctionLikeCall(CIRGenTypes &cgt, CIRGenModule &cgm, const CallArgList &args, const FunctionType *fnType)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
#define CC_VLS_CASE(ABI_VLEN)
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
CanQualType getCanonicalSizeType() const
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
std::vector< PFPField > findPFPFields(QualType Ty) const
Returns a list of PFP fields for the given type, including subfields in bases or other fields,...
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
This class is used for builtin types like 'int'.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
unsigned getNumBases() const
Retrieves the number of base classes of this class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const
ConstExprIterator const_arg_iterator
Represents a canonical, potentially-qualified type.
static CanQual< Type > CreateUnsafe(QualType Other)
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
unsigned getAddressSpace() const
Return the address space that this address resides in.
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
llvm::StringRef getName() const
Return the IR name of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
const ArgInfo * const_arg_iterator
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
const Decl * getDecl() const
unsigned getNumParams() const
bool hasFunctionDecl() const
An object to manage conditionally-evaluated expressions.
void begin(CodeGenFunction &CGF)
void end(CodeGenFunction &CGF)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
void callCStructDestructor(LValue Dst)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
const CodeGen::CGBlockInfo * BlockInfo
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void CreateCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
llvm::BasicBlock * getInvokeDest()
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
CGDebugInfo * getDebugInfo()
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
void EmitLifetimeEnd(llvm::Value *Addr)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
llvm::Type * ConvertTypeForMem(QualType T)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CallExpr * MustTailCall
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
This class organizes the cross-function state that is used while generating LLVM code.
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
ObjCEntrypoints & getObjCEntrypoints() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeDeviceKernelCallerDeclaration(QualType resultType, const FunctionArgList &args)
A device kernel caller function is an offload device entry point function with a target device depend...
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeFunctionDeclaration(const GlobalDecl GD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
A saved depth on the scope stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
static void initPointerAuthFnAttributes(const PointerAuthOptions &Opts, llvm::AttrBuilder &FuncAttrs)
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
const FunctionType * getFunctionType(bool BlocksToo=true) const
Looks through the Decl's underlying type to extract a FunctionType when possible.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
bool isZeroLengthBitField() const
Is this a zero-length bit-field?
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
@ SME_AgnosticZAStateMask
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
KernelReferenceKind getKernelReferenceKind() const
CXXDtorType getDtorType() const
const Decl * getDecl() const
This class represents temporary values used to represent inout and out arguments in HLSL.
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
field_iterator field_end() const
bool isParamDestroyedInCallee() const
field_iterator field_begin() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
bool isIncompleteArrayType() const
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
RecordDecl * castAsRecordDecl() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isRecordType() const
bool isObjCRetainableType() const
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool This(InterpState &S, CodePtr OpPC)
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isa(CodeGen::Address addr)
static bool classof(const OMPClause *T)
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_VectorDeleting
Vector deleting dtor.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
@ Struct
The "struct" keyword introduces the elaborated-type-specifier.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
const Expr * WritebackExpr
An Expression (optional) that performs the writeback with any required casting.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
~DisableDebugLocationUpdates()
DisableDebugLocationUpdates(CodeGenFunction &CGF)
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.