35#include "llvm/ADT/STLExtras.h"
36#include "llvm/ADT/StringExtras.h"
37#include "llvm/Analysis/ValueTracking.h"
38#include "llvm/IR/Assumptions.h"
39#include "llvm/IR/AttributeMask.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugInfoMetadata.h"
44#include "llvm/IR/InlineAsm.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/Type.h"
48#include "llvm/Transforms/Utils/Local.h"
58 return llvm::CallingConv::C;
60 return llvm::CallingConv::X86_StdCall;
62 return llvm::CallingConv::X86_FastCall;
64 return llvm::CallingConv::X86_RegCall;
66 return llvm::CallingConv::X86_ThisCall;
68 return llvm::CallingConv::Win64;
70 return llvm::CallingConv::X86_64_SysV;
72 return llvm::CallingConv::ARM_AAPCS;
74 return llvm::CallingConv::ARM_AAPCS_VFP;
76 return llvm::CallingConv::Intel_OCL_BI;
79 return llvm::CallingConv::C;
82 return llvm::CallingConv::X86_VectorCall;
84 return llvm::CallingConv::AArch64_VectorCall;
86 return llvm::CallingConv::AArch64_SVE_VectorCall;
88 return llvm::CallingConv::SPIR_FUNC;
90 return CGM.getTargetCodeGenInfo().getDeviceKernelCallingConv();
92 return llvm::CallingConv::PreserveMost;
94 return llvm::CallingConv::PreserveAll;
96 return llvm::CallingConv::Swift;
98 return llvm::CallingConv::SwiftTail;
100 return llvm::CallingConv::M68k_RTD;
102 return llvm::CallingConv::PreserveNone;
106#define CC_VLS_CASE(ABI_VLEN) \
107 case CC_RISCVVLSCall_##ABI_VLEN: \
108 return llvm::CallingConv::RISCV_VLSCall_##ABI_VLEN;
133 RecTy = Context.getCanonicalTagType(RD);
135 RecTy = Context.VoidTy;
140 return Context.getPointerType(RecTy);
173 assert(paramInfos.size() <= prefixArgs);
174 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
176 paramInfos.reserve(totalArgs);
179 paramInfos.resize(prefixArgs);
183 paramInfos.push_back(ParamInfo);
185 if (ParamInfo.hasPassObjectSize())
186 paramInfos.emplace_back();
189 assert(paramInfos.size() <= totalArgs &&
190 "Did we forget to insert pass_object_size args?");
192 paramInfos.resize(totalArgs);
202 if (!FPT->hasExtParameterInfos()) {
203 assert(paramInfos.empty() &&
204 "We have paramInfos, but the prototype doesn't?");
205 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
209 unsigned PrefixSize = prefix.size();
213 prefix.reserve(prefix.size() + FPT->getNumParams());
215 auto ExtInfos = FPT->getExtParameterInfos();
216 assert(ExtInfos.size() == FPT->getNumParams());
217 for (
unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
218 prefix.push_back(FPT->getParamType(I));
219 if (ExtInfos[I].hasPassObjectSize())
244 FTP->getExtInfo(), paramInfos,
Required);
254 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
259 bool IsTargetDefaultMSABI) {
264 if (D->
hasAttr<FastCallAttr>())
270 if (D->
hasAttr<ThisCallAttr>())
273 if (D->
hasAttr<VectorCallAttr>())
279 if (PcsAttr *PCS = D->
getAttr<PcsAttr>())
282 if (D->
hasAttr<AArch64VectorPcsAttr>())
285 if (D->
hasAttr<AArch64SVEPcsAttr>())
288 if (D->
hasAttr<DeviceKernelAttr>())
291 if (D->
hasAttr<IntelOclBiccAttr>())
300 if (D->
hasAttr<PreserveMostAttr>())
303 if (D->
hasAttr<PreserveAllAttr>())
309 if (D->
hasAttr<PreserveNoneAttr>())
312 if (D->
hasAttr<RISCVVectorCCAttr>())
315 if (RISCVVLSCCAttr *PCS = D->
getAttr<RISCVVLSCCAttr>()) {
316 switch (PCS->getVectorWidth()) {
318 llvm_unreachable(
"Invalid RISC-V VLS ABI VLEN");
319#define CC_VLS_CASE(ABI_VLEN) \
321 return CC_RISCVVLSCall_##ABI_VLEN;
356 return ::arrangeLLVMFunctionInfo(
357 *
this,
true, argTypes,
364 if (FD->
hasAttr<CUDAGlobalAttr>()) {
400 !Target.getCXXABI().hasConstructorVariants();
413 bool PassParams =
true;
415 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
418 if (
auto Inherited = CD->getInheritedConstructor())
430 if (!paramInfos.empty()) {
433 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
436 paramInfos.append(AddedArgs.
Suffix,
441 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
447 ? CGM.getContext().VoidPtrTy
450 argTypes, extInfo, paramInfos, required);
456 for (
auto &arg : args)
464 for (
auto &arg : args)
471 unsigned totalArgs) {
489 unsigned ExtraPrefixArgs,
unsigned ExtraSuffixArgs,
bool PassProtoArgs) {
491 for (
const auto &Arg : args)
492 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
495 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
500 FPT, TotalPrefixArgs + ExtraSuffixArgs)
506 ? CGM.getContext().VoidPtrTy
513 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
520 ArgTypes, Info, ParamInfos,
Required);
529 if (MD->isImplicitObjectMemberFunction())
537 if (DeviceKernelAttr::isOpenCLSpelling(FD->
getAttr<DeviceKernelAttr>()) &&
540 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
548 {}, noProto->getExtInfo(), {},
575 argTys.push_back(Context.getCanonicalParamType(receiverType));
577 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
579 argTys.push_back(Context.getCanonicalParamType(I->getType()));
581 I->hasAttr<NoEscapeAttr>());
582 extParamInfos.push_back(extParamInfo);
586 bool IsTargetDefaultMSABI =
592 if (
getContext().getLangOpts().ObjCAutoRefCount &&
593 MD->
hasAttr<NSReturnsRetainedAttr>())
630 assert(MD->
isVirtual() &&
"only methods have thunks");
647 ArgTys.push_back(*FTP->param_type_begin());
649 ArgTys.push_back(Context.IntTy);
650 CallingConv CC = Context.getDefaultCallingConvention(
662 unsigned numExtraRequiredArgs,
bool chainCall) {
663 assert(args.size() >= numExtraRequiredArgs);
673 if (proto->isVariadic())
676 if (proto->hasExtParameterInfos())
690 for (
const auto &arg : args)
695 paramInfos, required);
705 chainCall ? 1 : 0, chainCall);
734 for (
const auto &Arg : args)
735 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
775 assert(numPrefixArgs + 1 <= args.size() &&
776 "Emitting a call with less args than the required prefix?");
787 paramInfos, required);
798 assert(signature.
arg_size() <= args.size());
799 if (signature.
arg_size() == args.size())
804 if (!sigParamInfos.empty()) {
805 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
806 paramInfos.resize(args.size());
838 assert(llvm::all_of(argTypes,
839 [](
CanQualType T) {
return T.isCanonicalAsParam(); }));
842 llvm::FoldingSetNodeID ID;
847 bool isDelegateCall =
850 info, paramInfos, required, resultType, argTypes);
852 void *insertPos =
nullptr;
853 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
861 info, paramInfos, resultType, argTypes, required);
862 FunctionInfos.InsertNode(FI, insertPos);
864 bool inserted = FunctionsBeingProcessed.insert(FI).second;
866 assert(inserted &&
"Recursively being processed?");
870 (CC == llvm::CallingConv::SPIR_KERNEL || CC == llvm::CallingConv::C)) {
880 CGM.getABIInfo().computeInfo(*FI);
891 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
894 bool erased = FunctionsBeingProcessed.erase(FI);
896 assert(erased &&
"Not in set?");
902 bool chainCall,
bool delegateCall,
908 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
912 void *buffer =
operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
913 argTypes.size() + 1, paramInfos.size()));
915 CGFunctionInfo *FI =
new (buffer) CGFunctionInfo();
916 FI->CallingConvention = llvmCC;
917 FI->EffectiveCallingConvention = llvmCC;
918 FI->ASTCallingConvention = info.
getCC();
919 FI->InstanceMethod = instanceMethod;
920 FI->ChainCall = chainCall;
921 FI->DelegateCall = delegateCall;
927 FI->Required = required;
930 FI->ArgStruct =
nullptr;
931 FI->ArgStructAlign = 0;
932 FI->NumArgs = argTypes.size();
933 FI->HasExtParameterInfos = !paramInfos.empty();
934 FI->getArgsBuffer()[0].
type = resultType;
935 FI->MaxVectorWidth = 0;
936 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
937 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
938 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
939 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
949struct TypeExpansion {
950 enum TypeExpansionKind {
962 const TypeExpansionKind Kind;
964 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
965 virtual ~TypeExpansion() {}
968struct ConstantArrayExpansion : TypeExpansion {
972 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
973 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
974 static bool classof(
const TypeExpansion *TE) {
975 return TE->Kind == TEK_ConstantArray;
979struct RecordExpansion : TypeExpansion {
980 SmallVector<const CXXBaseSpecifier *, 1> Bases;
982 SmallVector<const FieldDecl *, 1> Fields;
984 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
985 SmallVector<const FieldDecl *, 1> &&Fields)
986 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
987 Fields(std::move(Fields)) {}
988 static bool classof(
const TypeExpansion *TE) {
989 return TE->Kind == TEK_Record;
993struct ComplexExpansion : TypeExpansion {
996 ComplexExpansion(QualType EltTy) : TypeExpansion(
TEK_Complex), EltTy(EltTy) {}
997 static bool classof(
const TypeExpansion *TE) {
1002struct NoExpansion : TypeExpansion {
1003 NoExpansion() : TypeExpansion(TEK_None) {}
1004 static bool classof(
const TypeExpansion *TE) {
return TE->Kind == TEK_None; }
1008static std::unique_ptr<TypeExpansion>
1011 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
1017 assert(!RD->hasFlexibleArrayMember() &&
1018 "Cannot expand structure with flexible array.");
1019 if (RD->isUnion()) {
1025 for (
const auto *FD : RD->fields()) {
1026 if (FD->isZeroLengthBitField())
1028 assert(!FD->isBitField() &&
1029 "Cannot expand structure with bit-field members.");
1030 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
1031 if (UnionSize < FieldSize) {
1032 UnionSize = FieldSize;
1037 Fields.push_back(LargestFD);
1039 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1040 assert(!CXXRD->isDynamicClass() &&
1041 "cannot expand vtable pointers in dynamic classes");
1042 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
1045 for (
const auto *FD : RD->fields()) {
1046 if (FD->isZeroLengthBitField())
1048 assert(!FD->isBitField() &&
1049 "Cannot expand structure with bit-field members.");
1050 Fields.push_back(FD);
1053 return std::make_unique<RecordExpansion>(std::move(Bases),
1057 return std::make_unique<ComplexExpansion>(CT->getElementType());
1059 return std::make_unique<NoExpansion>();
1064 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1067 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1069 for (
auto BS : RExp->Bases)
1071 for (
auto FD : RExp->Fields)
1084 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1085 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1088 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1089 for (
auto BS : RExp->Bases)
1091 for (
auto FD : RExp->Fields)
1093 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1104 ConstantArrayExpansion *CAE,
1106 llvm::function_ref<
void(
Address)> Fn) {
1107 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1113void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1114 llvm::Function::arg_iterator &AI) {
1115 assert(LV.isSimple() &&
1116 "Unexpected non-simple lvalue during struct expansion.");
1119 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1121 *
this, CAExp, LV.getAddress(), [&](Address EltAddr) {
1122 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1123 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1125 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1127 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1131 false, SourceLocation());
1132 LValue SubLV = MakeAddrLValue(Base, BS->
getType());
1135 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1137 for (
auto FD : RExp->Fields) {
1139 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1140 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1143 auto realValue = &*AI++;
1144 auto imagValue = &*AI++;
1145 EmitStoreOfComplex(
ComplexPairTy(realValue, imagValue), LV,
true);
1150 llvm::Value *Arg = &*AI++;
1151 if (LV.isBitField()) {
1157 if (Arg->getType()->isPointerTy()) {
1159 Arg = Builder.CreateBitCast(Arg,
Addr.getElementType());
1161 EmitStoreOfScalar(Arg, LV);
1166void CodeGenFunction::ExpandTypeToArgs(
1167 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1168 SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos) {
1170 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1175 CallArg(convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1177 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1180 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1183 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1187 false, SourceLocation());
1191 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1195 LValue LV = MakeAddrLValue(This, Ty);
1196 for (
auto FD : RExp->Fields) {
1198 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1199 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1204 IRCallArgs[IRCallArgPos++] = CV.first;
1205 IRCallArgs[IRCallArgPos++] = CV.second;
1209 assert(RV.isScalar() &&
1210 "Unexpected non-scalar rvalue during struct expansion.");
1213 llvm::Value *
V = RV.getScalarVal();
1214 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1215 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1216 V = Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1218 IRCallArgs[IRCallArgPos++] =
V;
1226 const Twine &Name =
"tmp") {
1239 llvm::StructType *SrcSTy,
1243 if (SrcSTy->getNumElements() == 0)
1252 uint64_t FirstEltSize = CGF.
CGM.
getDataLayout().getTypeStoreSize(FirstElt);
1253 if (FirstEltSize < DstSize &&
1262 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1277 if (Val->getType() == Ty)
1283 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1289 llvm::Type *DestIntTy = Ty;
1293 if (Val->getType() != DestIntTy) {
1295 if (DL.isBigEndian()) {
1298 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1299 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1301 if (SrcSize > DstSize) {
1302 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1303 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1305 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1306 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1310 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1315 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1322 if (PFPFields.empty())
1325 auto LoadCoercedField = [&](
CharUnits Offset,
1326 llvm::Type *FieldType) -> llvm::Value * {
1331 if (!PFPFields.empty() && PFPFields[0].Offset == Offset) {
1335 FieldVal = CGF.
Builder.CreatePtrToInt(FieldVal, FieldType);
1336 PFPFields.erase(PFPFields.begin());
1353 Val = CGF.
Builder.CreatePtrToInt(Val, Ty);
1357 auto *ET = AT->getElementType();
1361 llvm::Value *Val = llvm::PoisonValue::get(AT);
1362 for (
unsigned Idx = 0; Idx != AT->getNumElements(); ++Idx, Offset += WordSize)
1363 Val = CGF.
Builder.CreateInsertValue(Val, LoadCoercedField(Offset, ET), Idx);
1387 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1389 DstSize.getFixedValue(), CGF);
1404 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1405 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1419 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1420 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1423 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1424 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1425 ScalableDstTy = llvm::ScalableVectorType::get(
1426 FixedSrcTy->getElementType(),
1428 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
1430 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1432 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1433 llvm::Value *Result = CGF.
Builder.CreateInsertVector(
1434 ScalableDstTy, PoisonVec, Load, uint64_t(0),
"cast.scalable");
1436 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, Ty));
1437 if (Result->getType() != ScalableDstTy)
1438 Result = CGF.
Builder.CreateBitCast(Result, ScalableDstTy);
1439 if (Result->getType() != Ty)
1440 Result = CGF.
Builder.CreateExtractVector(Ty, Result, uint64_t(0));
1452 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1459 if (PFPFields.empty())
1462 llvm::Type *SrcTy = Src->getType();
1463 auto StoreCoercedField = [&](
CharUnits Offset, llvm::Value *FieldVal) {
1464 if (!PFPFields.empty() && PFPFields[0].Offset == Offset) {
1469 PFPFields.erase(PFPFields.begin());
1489 auto *ET = AT->getElementType();
1493 for (
unsigned i = 0; i != AT->getNumElements(); ++i, Offset += WordSize)
1494 StoreCoercedField(Offset, CGF.
Builder.CreateExtractValue(Src, i));
1500 Address Dst, llvm::TypeSize DstSize,
1501 bool DstIsVolatile) {
1505 llvm::Type *SrcTy = Src->getType();
1506 llvm::TypeSize SrcSize =
CGM.getDataLayout().getTypeAllocSize(SrcTy);
1512 if (llvm::StructType *DstSTy =
1514 assert(!SrcSize.isScalable());
1516 SrcSize.getFixedValue(), *
this);
1523 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1524 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1528 auto *I =
Builder.CreateStore(Src, Dst, DstIsVolatile);
1530 }
else if (llvm::StructType *STy =
1531 dyn_cast<llvm::StructType>(Src->getType())) {
1534 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1536 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1537 auto *I =
Builder.CreateStore(Elt, EltPtr, DstIsVolatile);
1545 }
else if (SrcTy->isIntegerTy()) {
1547 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1564 Builder.CreateStore(Src, Tmp);
1565 auto *I =
Builder.CreateMemCpy(
1584static std::pair<llvm::Value *, bool>
1586 llvm::ScalableVectorType *FromTy, llvm::Value *
V,
1587 StringRef Name =
"") {
1590 if (FromTy->getElementType()->isIntegerTy(1) &&
1591 ToTy->getElementType() == CGF.
Builder.getInt8Ty()) {
1592 if (!FromTy->getElementCount().isKnownMultipleOf(8)) {
1593 FromTy = llvm::ScalableVectorType::get(
1594 FromTy->getElementType(),
1595 llvm::alignTo<8>(FromTy->getElementCount().getKnownMinValue()));
1596 llvm::Value *ZeroVec = llvm::Constant::getNullValue(FromTy);
1597 V = CGF.
Builder.CreateInsertVector(FromTy, ZeroVec,
V, uint64_t(0));
1599 FromTy = llvm::ScalableVectorType::get(
1600 ToTy->getElementType(),
1601 FromTy->getElementCount().getKnownMinValue() / 8);
1602 V = CGF.
Builder.CreateBitCast(
V, FromTy);
1604 if (FromTy->getElementType() == ToTy->getElementType()) {
1605 V->setName(Name +
".coerce");
1606 V = CGF.
Builder.CreateExtractVector(ToTy,
V, uint64_t(0),
"cast.fixed");
1616class ClangToLLVMArgMapping {
1617 static const unsigned InvalidIndex = ~0U;
1618 unsigned InallocaArgNo;
1620 unsigned TotalIRArgs;
1624 unsigned PaddingArgIndex;
1627 unsigned FirstArgIndex;
1628 unsigned NumberOfArgs;
1631 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1635 SmallVector<IRArgs, 8> ArgInfo;
1638 ClangToLLVMArgMapping(
const ASTContext &Context,
const CGFunctionInfo &FI,
1639 bool OnlyRequiredArgs =
false)
1640 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1641 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1642 construct(Context, FI, OnlyRequiredArgs);
1645 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1646 unsigned getInallocaArgNo()
const {
1647 assert(hasInallocaArg());
1648 return InallocaArgNo;
1651 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1652 unsigned getSRetArgNo()
const {
1653 assert(hasSRetArg());
1657 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1659 bool hasPaddingArg(
unsigned ArgNo)
const {
1660 assert(ArgNo < ArgInfo.size());
1661 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1663 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1664 assert(hasPaddingArg(ArgNo));
1665 return ArgInfo[ArgNo].PaddingArgIndex;
1670 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1671 assert(ArgNo < ArgInfo.size());
1672 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1673 ArgInfo[ArgNo].NumberOfArgs);
1677 void construct(
const ASTContext &Context,
const CGFunctionInfo &FI,
1678 bool OnlyRequiredArgs);
1681void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1682 const CGFunctionInfo &FI,
1683 bool OnlyRequiredArgs) {
1684 unsigned IRArgNo = 0;
1685 bool SwapThisWithSRet =
false;
1690 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1698 QualType ArgType = I->type;
1699 const ABIArgInfo &AI = I->info;
1701 auto &IRArgs = ArgInfo[ArgNo];
1704 IRArgs.PaddingArgIndex = IRArgNo++;
1711 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1713 IRArgs.NumberOfArgs = STy->getNumElements();
1715 IRArgs.NumberOfArgs = 1;
1721 IRArgs.NumberOfArgs = 1;
1726 IRArgs.NumberOfArgs = 0;
1736 if (IRArgs.NumberOfArgs > 0) {
1737 IRArgs.FirstArgIndex = IRArgNo;
1738 IRArgNo += IRArgs.NumberOfArgs;
1743 if (IRArgNo == 1 && SwapThisWithSRet)
1746 assert(ArgNo == ArgInfo.size());
1749 InallocaArgNo = IRArgNo++;
1751 TotalIRArgs = IRArgNo;
1759 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1774 switch (BT->getKind()) {
1777 case BuiltinType::Float:
1779 case BuiltinType::Double:
1781 case BuiltinType::LongDouble:
1792 if (BT->getKind() == BuiltinType::LongDouble)
1793 return getTarget().useObjCFP2RetForComplexLongDouble();
1807 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1809 assert(Inserted &&
"Recursively being processed?");
1811 llvm::Type *resultType =
nullptr;
1816 llvm_unreachable(
"Invalid ABI kind for return argument");
1828 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1829 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1845 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1849 if (IRFunctionArgs.hasSRetArg()) {
1850 ArgTypes[IRFunctionArgs.getSRetArgNo()] = llvm::PointerType::get(
1855 if (IRFunctionArgs.hasInallocaArg())
1856 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1863 for (; it != ie; ++it, ++ArgNo) {
1867 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1868 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1871 unsigned FirstIRArg, NumIRArgs;
1872 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1877 assert(NumIRArgs == 0);
1881 assert(NumIRArgs == 1);
1883 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1887 assert(NumIRArgs == 1);
1888 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1897 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1899 assert(NumIRArgs == st->getNumElements());
1900 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1901 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1903 assert(NumIRArgs == 1);
1904 ArgTypes[FirstIRArg] = argType;
1910 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1912 *ArgTypesIter++ = EltTy;
1914 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1919 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1921 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1926 bool Erased = FunctionsBeingProcessed.erase(&FI);
1928 assert(Erased &&
"Not in set?");
1930 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1944 llvm::AttrBuilder &FuncAttrs,
1951 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1955 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1957 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1959 FuncAttrs.addAttribute(
"aarch64_za_state_agnostic");
1963 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1965 FuncAttrs.addAttribute(
"aarch64_in_za");
1967 FuncAttrs.addAttribute(
"aarch64_out_za");
1969 FuncAttrs.addAttribute(
"aarch64_inout_za");
1973 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1975 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1977 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1979 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1983 const Decl *Callee) {
1989 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1990 AA->getAssumption().split(Attrs,
",");
1993 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1994 llvm::join(Attrs.begin(), Attrs.end(),
","));
2001 if (
const RecordType *RT =
2003 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
2004 return ClassDecl->hasTrivialDestructor();
2010 const Decl *TargetDecl) {
2016 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
2020 if (!
Module.getLangOpts().CPlusPlus)
2023 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
2024 if (FDecl->isExternC())
2026 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
2028 if (VDecl->isExternC())
2036 return Module.getCodeGenOpts().StrictReturn ||
2037 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
2038 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
2045 llvm::DenormalMode FP32DenormalMode,
2046 llvm::AttrBuilder &FuncAttrs) {
2047 llvm::DenormalFPEnv FPEnv(FPDenormalMode, FP32DenormalMode);
2048 if (FPEnv != llvm::DenormalFPEnv::getDefault())
2049 FuncAttrs.addDenormalFPEnvAttr(FPEnv);
2057 llvm::AttrBuilder &FuncAttrs) {
2063 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
2065 llvm::AttrBuilder &FuncAttrs) {
2068 if (CodeGenOpts.OptimizeSize)
2069 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
2070 if (CodeGenOpts.OptimizeSize == 2)
2071 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
2074 if (CodeGenOpts.DisableRedZone)
2075 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
2076 if (CodeGenOpts.IndirectTlsSegRefs)
2077 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
2078 if (CodeGenOpts.NoImplicitFloat)
2079 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
2081 if (AttrOnCallSite) {
2086 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
2088 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
2090 switch (CodeGenOpts.getFramePointer()) {
2098 FuncAttrs.addAttribute(
"frame-pointer",
2100 CodeGenOpts.getFramePointer()));
2103 if (CodeGenOpts.LessPreciseFPMAD)
2104 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
2106 if (CodeGenOpts.NullPointerIsValid)
2107 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
2110 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
2114 if (CodeGenOpts.SoftFloat)
2115 FuncAttrs.addAttribute(
"use-soft-float",
"true");
2116 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
2117 llvm::utostr(CodeGenOpts.SSPBufferSize));
2118 if (LangOpts.NoSignedZero)
2119 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
2122 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
2123 if (!Recips.empty())
2124 FuncAttrs.addAttribute(
"reciprocal-estimates", llvm::join(Recips,
","));
2128 FuncAttrs.addAttribute(
"prefer-vector-width",
2131 if (CodeGenOpts.StackRealignment)
2132 FuncAttrs.addAttribute(
"stackrealign");
2133 if (CodeGenOpts.Backchain)
2134 FuncAttrs.addAttribute(
"backchain");
2135 if (CodeGenOpts.EnableSegmentedStacks)
2136 FuncAttrs.addAttribute(
"split-stack");
2138 if (CodeGenOpts.SpeculativeLoadHardening)
2139 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2142 switch (CodeGenOpts.getZeroCallUsedRegs()) {
2143 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
2144 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2146 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
2147 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
2149 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
2150 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
2152 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
2153 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
2155 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
2156 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
2158 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
2159 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
2161 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2162 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2164 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2165 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2167 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2168 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2179 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2184 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2185 LangOpts.SYCLIsDevice) {
2186 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2189 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2190 FuncAttrs.addAttribute(
"save-reg-params");
2193 StringRef Var,
Value;
2195 FuncAttrs.addAttribute(Var,
Value);
2209 const llvm::Function &F,
2211 auto FFeatures = F.getFnAttribute(
"target-features");
2213 llvm::StringSet<> MergedNames;
2215 MergedFeatures.reserve(TargetOpts.
Features.size());
2217 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2218 for (StringRef
Feature : FeatureRange) {
2222 StringRef Name =
Feature.drop_front(1);
2223 bool Merged = !MergedNames.insert(Name).second;
2225 MergedFeatures.push_back(
Feature);
2229 if (FFeatures.isValid())
2230 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2231 AddUnmergedFeatures(TargetOpts.
Features);
2233 if (!MergedFeatures.empty()) {
2234 llvm::sort(MergedFeatures);
2235 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2242 bool WillInternalize) {
2244 llvm::AttrBuilder FuncAttrs(F.getContext());
2247 if (!TargetOpts.
CPU.empty())
2248 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2249 if (!TargetOpts.
TuneCPU.empty())
2250 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2253 CodeGenOpts, LangOpts,
2256 if (!WillInternalize && F.isInterposable()) {
2261 F.addFnAttrs(FuncAttrs);
2265 llvm::AttributeMask AttrsToRemove;
2269 llvm::DenormalFPEnv MergedFPEnv =
2270 OptsFPEnv.mergeCalleeMode(F.getDenormalFPEnv());
2272 if (MergedFPEnv == llvm::DenormalFPEnv::getDefault()) {
2273 AttrsToRemove.addAttribute(llvm::Attribute::DenormalFPEnv);
2276 FuncAttrs.addDenormalFPEnvAttr(MergedFPEnv);
2279 F.removeFnAttrs(AttrsToRemove);
2283 F.addFnAttrs(FuncAttrs);
2286void CodeGenModule::getTrivialDefaultFunctionAttributes(
2287 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2288 llvm::AttrBuilder &FuncAttrs) {
2290 getLangOpts(), AttrOnCallSite,
2294void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2296 bool AttrOnCallSite,
2297 llvm::AttrBuilder &FuncAttrs) {
2301 if (!AttrOnCallSite)
2307 if (!AttrOnCallSite)
2312 llvm::AttrBuilder &attrs) {
2313 getDefaultFunctionAttributes(
"",
false,
2315 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2320 const NoBuiltinAttr *NBA =
nullptr) {
2321 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2323 AttributeName +=
"no-builtin-";
2324 AttributeName += BuiltinName;
2325 FuncAttrs.addAttribute(AttributeName);
2329 if (LangOpts.NoBuiltin) {
2331 FuncAttrs.addAttribute(
"no-builtins");
2345 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2346 FuncAttrs.addAttribute(
"no-builtins");
2351 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2355 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2356 bool CheckCoerce =
true) {
2363 if (!DL.typeSizeEqualsStoreSize(Ty))
2370 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2371 DL.getTypeSizeInBits(Ty)))
2395 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2406 unsigned NumRequiredArgs,
unsigned ArgNo) {
2407 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2412 if (ArgNo >= NumRequiredArgs)
2416 if (ArgNo < FD->getNumParams()) {
2417 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2418 if (Param && Param->hasAttr<MaybeUndefAttr>())
2435 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2438 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2440 llvm::all_of(ST->elements(),
2441 llvm::AttributeFuncs::isNoFPClassCompatibleType);
2449 llvm::FPClassTest Mask = llvm::fcNone;
2450 if (LangOpts.NoHonorInfs)
2451 Mask |= llvm::fcInf;
2452 if (LangOpts.NoHonorNaNs)
2453 Mask |= llvm::fcNan;
2459 llvm::AttributeList &Attrs) {
2460 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2461 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2462 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2488 llvm::AttributeList &AttrList,
2490 bool AttrOnCallSite,
bool IsThunk) {
2498 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2500 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2511 bool HasOptnone =
false;
2513 const NoBuiltinAttr *NBA =
nullptr;
2517 std::optional<llvm::Attribute::AttrKind> MemAttrForPtrArgs;
2518 bool AddedPotentialArgAccess =
false;
2519 auto AddPotentialArgAccess = [&]() {
2520 AddedPotentialArgAccess =
true;
2521 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2523 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2524 llvm::MemoryEffects::argMemOnly());
2531 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2532 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2533 if (TargetDecl->
hasAttr<NoThrowAttr>())
2534 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2535 if (TargetDecl->
hasAttr<NoReturnAttr>())
2536 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2537 if (TargetDecl->
hasAttr<ColdAttr>())
2538 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2539 if (TargetDecl->
hasAttr<HotAttr>())
2540 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2541 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2542 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2543 if (TargetDecl->
hasAttr<ConvergentAttr>())
2544 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2546 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2549 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2551 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2553 (Kind == OO_New || Kind == OO_Array_New))
2554 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2557 const bool IsVirtualCall = MD && MD->
isVirtual();
2560 if (!(AttrOnCallSite && IsVirtualCall)) {
2561 if (Fn->isNoReturn())
2562 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2563 NBA = Fn->getAttr<NoBuiltinAttr>();
2570 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2571 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2575 if (TargetDecl->
hasAttr<ConstAttr>()) {
2576 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2577 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2580 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2581 MemAttrForPtrArgs = llvm::Attribute::ReadNone;
2582 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2583 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2584 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2586 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2587 MemAttrForPtrArgs = llvm::Attribute::ReadOnly;
2588 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2589 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2590 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2592 if (
const auto *RA = TargetDecl->
getAttr<RestrictAttr>();
2593 RA && RA->getDeallocator() ==
nullptr)
2594 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2595 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2596 !CodeGenOpts.NullPointerIsValid)
2597 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2598 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2599 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2600 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2601 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2602 if (TargetDecl->
hasAttr<LeafAttr>())
2603 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2604 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2605 FuncAttrs.addAttribute(
"bpf_fastcall");
2607 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2608 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2609 std::optional<unsigned> NumElemsParam;
2610 if (AllocSize->getNumElemsParam().isValid())
2611 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2612 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2622 FuncAttrs.addAttribute(
"uniform-work-group-size");
2624 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2625 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2627 if (
auto *ModularFormat = TargetDecl->
getAttr<ModularFormatAttr>()) {
2628 FormatAttr *Format = TargetDecl->
getAttr<FormatAttr>();
2629 StringRef
Type = Format->getType()->getName();
2630 std::string FormatIdx = std::to_string(Format->getFormatIdx());
2631 std::string FirstArg = std::to_string(Format->getFirstArg());
2633 Type, FormatIdx, FirstArg,
2634 ModularFormat->getModularImplFn()->getName(),
2635 ModularFormat->getImplName()};
2636 llvm::append_range(Args, ModularFormat->aspects());
2637 FuncAttrs.addAttribute(
"modular-format", llvm::join(Args,
","));
2650 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2655 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2656 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2657 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2658 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2659 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2660 FuncAttrs.removeAttribute(
"split-stack");
2661 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2664 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2665 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2666 FuncAttrs.addAttribute(
2667 "zero-call-used-regs",
2668 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2675 if (CodeGenOpts.NoPLT) {
2676 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2677 if (!Fn->isDefined() && !AttrOnCallSite) {
2678 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2683 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2684 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2689 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2690 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2691 if (!FD->isExternallyVisible())
2692 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2699 if (!AttrOnCallSite) {
2700 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2701 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2704 auto shouldDisableTailCalls = [&] {
2706 if (CodeGenOpts.DisableTailCalls)
2712 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2713 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2716 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2717 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2718 if (!BD->doesNotEscape())
2724 if (shouldDisableTailCalls())
2725 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2730 static const llvm::StringSet<> ReturnsTwiceFn{
2731 "_setjmpex",
"setjmp",
"_setjmp",
"vfork",
2732 "sigsetjmp",
"__sigsetjmp",
"savectx",
"getcontext"};
2733 if (ReturnsTwiceFn.contains(Name))
2734 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2738 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2741 if (!MSHotPatchFunctions.empty()) {
2742 bool IsHotPatched = llvm::binary_search(MSHotPatchFunctions, Name);
2744 FuncAttrs.addAttribute(
"marked_for_windows_hot_patching");
2749 if (CodeGenOpts.isLoaderReplaceableFunctionName(Name))
2750 FuncAttrs.addAttribute(
"loader-replaceable");
2753 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2760 if (CodeGenOpts.EnableNoundefAttrs &&
2764 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2770 RetAttrs.addAttribute(llvm::Attribute::SExt);
2772 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2774 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2779 RetAttrs.addAttribute(llvm::Attribute::InReg);
2791 AddPotentialArgAccess();
2800 llvm_unreachable(
"Invalid ABI kind for return argument");
2808 RetAttrs.addDereferenceableAttr(
2810 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2811 !CodeGenOpts.NullPointerIsValid)
2812 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2814 llvm::Align Alignment =
2816 RetAttrs.addAlignmentAttr(Alignment);
2821 bool hasUsedSRet =
false;
2825 if (IRFunctionArgs.hasSRetArg()) {
2827 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2828 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2829 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2832 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2834 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2839 if (IRFunctionArgs.hasInallocaArg()) {
2842 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2852 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2854 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2860 if (!CodeGenOpts.NullPointerIsValid &&
2862 Attrs.addAttribute(llvm::Attribute::NonNull);
2869 Attrs.addDereferenceableOrNullAttr(
2875 llvm::Align Alignment =
2879 Attrs.addAlignmentAttr(Alignment);
2881 const auto *DD = dyn_cast_if_present<CXXDestructorDecl>(
2895 CodeGenOpts.StrictLifetimes) {
2897 dyn_cast<CXXRecordDecl>(DD->getDeclContext());
2901 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo(
2902 Context.getASTRecordLayout(ClassDecl).getDataSize().getQuantity()));
2905 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2910 I != E; ++I, ++ArgNo) {
2916 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2918 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2921 .addAttribute(llvm::Attribute::InReg));
2926 if (CodeGenOpts.EnableNoundefAttrs &&
2928 Attrs.addAttribute(llvm::Attribute::NoUndef);
2937 Attrs.addAttribute(llvm::Attribute::SExt);
2939 Attrs.addAttribute(llvm::Attribute::ZExt);
2941 Attrs.addAttribute(llvm::Attribute::NoExt);
2946 Attrs.addAttribute(llvm::Attribute::Nest);
2948 Attrs.addAttribute(llvm::Attribute::InReg);
2949 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2956 Attrs.addAttribute(llvm::Attribute::InReg);
2968 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2976 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo());
2981 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2982 Decl->getArgPassingRestrictions() ==
2986 Attrs.addAttribute(llvm::Attribute::NoAlias);
3011 AddPotentialArgAccess();
3016 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
3027 AddPotentialArgAccess();
3035 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
3036 !CodeGenOpts.NullPointerIsValid)
3037 Attrs.addAttribute(llvm::Attribute::NonNull);
3039 llvm::Align Alignment =
3041 Attrs.addAlignmentAttr(Alignment);
3050 DeviceKernelAttr::isOpenCLSpelling(
3051 TargetDecl->
getAttr<DeviceKernelAttr>()) &&
3055 llvm::Align Alignment =
3057 Attrs.addAlignmentAttr(Alignment);
3064 Attrs.addAttribute(llvm::Attribute::NoAlias);
3073 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
3078 Attrs.addAttribute(llvm::Attribute::NoAlias);
3082 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
3083 auto info =
getContext().getTypeInfoInChars(PTy);
3084 Attrs.addDereferenceableAttr(info.Width.getQuantity());
3085 Attrs.addAlignmentAttr(info.Align.getAsAlign());
3091 Attrs.addAttribute(llvm::Attribute::SwiftError);
3095 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
3099 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
3104 Attrs.addCapturesAttr(llvm::CaptureInfo::none());
3106 if (Attrs.hasAttributes()) {
3107 unsigned FirstIRArg, NumIRArgs;
3108 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3109 for (
unsigned i = 0; i < NumIRArgs; i++)
3110 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
3119 AddPotentialArgAccess();
3122 if (AddedPotentialArgAccess && MemAttrForPtrArgs) {
3126 I != E; ++I, ++ArgNo) {
3127 if (I->info.isDirect() || I->info.isExpand() ||
3128 I->info.isCoerceAndExpand()) {
3129 unsigned FirstIRArg, NumIRArgs;
3130 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3131 for (
unsigned i = FirstIRArg; i < FirstIRArg + NumIRArgs; ++i) {
3138 if (i < FunctionType->getNumParams() &&
3148 AttrList = llvm::AttributeList::get(
3157 llvm::Value *value) {
3158 llvm::Type *varType = CGF.
ConvertType(var->getType());
3162 if (value->getType() == varType)
3165 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) &&
3166 "unexpected promotion type");
3169 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
3171 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
3177 QualType ArgType,
unsigned ArgNo) {
3185 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
3189 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
3196 if (NNAttr->isNonNull(ArgNo))
3203struct CopyBackSwiftError final : EHScopeStack::Cleanup {
3206 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(
arg) {}
3207 void Emit(CodeGenFunction &CGF, Flags flags)
override {
3226 if (FD->hasImplicitReturnZero()) {
3227 QualType RetTy = FD->getReturnType().getUnqualifiedType();
3228 llvm::Type *LLVMTy =
CGM.getTypes().ConvertType(RetTy);
3229 llvm::Constant *
Zero = llvm::Constant::getNullValue(LLVMTy);
3237 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), FI);
3238 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
3243 if (IRFunctionArgs.hasInallocaArg())
3244 ArgStruct =
Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
3248 if (IRFunctionArgs.hasSRetArg()) {
3249 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
3250 AI->setName(
"agg.result");
3251 AI->addAttr(llvm::Attribute::NoAlias);
3258 ArgVals.reserve(Args.size());
3264 assert(FI.
arg_size() == Args.size() &&
3265 "Mismatch between function signature & arguments.");
3268 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e;
3269 ++i, ++info_it, ++ArgNo) {
3282 unsigned FirstIRArg, NumIRArgs;
3283 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3287 assert(NumIRArgs == 0);
3300 assert(NumIRArgs == 1);
3323 llvm::ConstantInt::get(
IntPtrTy, Size.getQuantity()));
3324 ParamAddr = AlignedTemp;
3341 auto AI = Fn->getArg(FirstIRArg);
3349 assert(NumIRArgs == 1);
3351 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3354 PVD->getFunctionScopeIndex()) &&
3355 !
CGM.getCodeGenOpts().NullPointerIsValid)
3356 AI->addAttr(llvm::Attribute::NonNull);
3358 QualType OTy = PVD->getOriginalType();
3359 if (
const auto *ArrTy =
getContext().getAsConstantArrayType(OTy)) {
3365 QualType ETy = ArrTy->getElementType();
3366 llvm::Align Alignment =
3367 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3369 .addAlignmentAttr(Alignment));
3370 uint64_t ArrSize = ArrTy->getZExtSize();
3374 Attrs.addDereferenceableAttr(
3375 getContext().getTypeSizeInChars(ETy).getQuantity() *
3377 AI->addAttrs(Attrs);
3378 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3380 !
CGM.getCodeGenOpts().NullPointerIsValid) {
3381 AI->addAttr(llvm::Attribute::NonNull);
3384 }
else if (
const auto *ArrTy =
3390 QualType ETy = ArrTy->getElementType();
3391 llvm::Align Alignment =
3392 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3394 .addAlignmentAttr(Alignment));
3395 if (!
getTypes().getTargetAddressSpace(ETy) &&
3396 !
CGM.getCodeGenOpts().NullPointerIsValid)
3397 AI->addAttr(llvm::Attribute::NonNull);
3402 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3405 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3406 if (AVAttr && !
SanOpts.has(SanitizerKind::Alignment)) {
3410 llvm::ConstantInt *AlignmentCI =
3412 uint64_t AlignmentInt =
3413 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3414 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3415 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3417 .addAlignmentAttr(llvm::Align(AlignmentInt)));
3424 AI->addAttr(llvm::Attribute::NoAlias);
3432 assert(NumIRArgs == 1);
3436 llvm::Value *
V = AI;
3444 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3445 llvm::Value *incomingErrorValue =
Builder.CreateLoad(arg);
3446 Builder.CreateStore(incomingErrorValue, temp);
3467 if (
V->getType() != LTy)
3478 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3479 llvm::Value *ArgVal = Fn->getArg(FirstIRArg);
3480 if (
auto *VecTyFrom =
3481 dyn_cast<llvm::ScalableVectorType>(ArgVal->getType())) {
3483 *
this, VecTyTo, VecTyFrom, ArgVal, Arg->
getName());
3485 assert(NumIRArgs == 1);
3492 llvm::StructType *STy =
3503 STy->getNumElements() > 1) {
3504 llvm::TypeSize StructSize =
CGM.getDataLayout().getTypeAllocSize(STy);
3505 llvm::TypeSize PtrElementSize =
3507 if (StructSize.isScalable()) {
3508 assert(STy->containsHomogeneousScalableVectorTypes() &&
3509 "ABI only supports structure with homogeneous scalable vector "
3511 assert(StructSize == PtrElementSize &&
3512 "Only allow non-fractional movement of structure with"
3513 "homogeneous scalable vector type");
3514 assert(STy->getNumElements() == NumIRArgs);
3516 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3517 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3518 auto *AI = Fn->getArg(FirstIRArg + i);
3519 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3521 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3524 Builder.CreateStore(LoadedStructValue, Ptr);
3526 uint64_t SrcSize = StructSize.getFixedValue();
3527 uint64_t DstSize = PtrElementSize.getFixedValue();
3530 if (SrcSize <= DstSize) {
3537 assert(STy->getNumElements() == NumIRArgs);
3538 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3539 auto AI = Fn->getArg(FirstIRArg + i);
3540 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3542 Builder.CreateStore(AI, EltPtr);
3545 if (SrcSize > DstSize) {
3546 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3558 assert(NumIRArgs == 1);
3559 auto AI = Fn->getArg(FirstIRArg);
3560 AI->setName(Arg->
getName() +
".coerce");
3563 llvm::TypeSize::getFixed(
3564 getContext().getTypeSizeInChars(Ty).getQuantity() -
3589 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3593 unsigned argIndex = FirstIRArg;
3594 unsigned unpaddedIndex = 0;
3595 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3596 llvm::Type *eltType = coercionType->getElementType(i);
3600 auto eltAddr =
Builder.CreateStructGEP(alloca, i);
3601 llvm::Value *elt = Fn->getArg(argIndex++);
3603 auto paramType = unpaddedStruct
3604 ? unpaddedStruct->getElementType(unpaddedIndex++)
3605 : unpaddedCoercionType;
3607 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(eltType)) {
3608 if (
auto *VecTyFrom = dyn_cast<llvm::ScalableVectorType>(paramType)) {
3611 *
this, VecTyTo, VecTyFrom, elt, elt->getName());
3612 assert(Extracted &&
"Unexpected scalable to fixed vector coercion");
3615 Builder.CreateStore(elt, eltAddr);
3617 assert(argIndex == FirstIRArg + NumIRArgs);
3629 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3630 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3631 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3632 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3633 auto AI = Fn->getArg(FirstIRArg + i);
3634 AI->setName(Arg->
getName() +
"." + Twine(i));
3640 auto *AI = Fn->getArg(FirstIRArg);
3641 AI->setName(Arg->
getName() +
".target_coerce");
3645 CGM.getABIInfo().createCoercedStore(AI, Ptr, ArgI,
false, *
this);
3659 assert(NumIRArgs == 0);
3671 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3672 for (
int I = Args.size() - 1; I >= 0; --I)
3675 for (
unsigned I = 0, E = Args.size(); I != E; ++I)
3681 while (insn->use_empty()) {
3682 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3688 bitcast->eraseFromParent();
3694 llvm::Value *result) {
3696 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3699 if (&BB->back() != result)
3702 llvm::Type *resultType = result->getType();
3711 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3717 if (generator->getNextNode() != bitcast)
3720 InstsToKill.push_back(bitcast);
3727 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3731 bool doRetainAutorelease;
3734 doRetainAutorelease =
true;
3735 }
else if (call->getCalledOperand() ==
3737 doRetainAutorelease =
false;
3745 llvm::Instruction *prev = call->getPrevNode();
3748 prev = prev->getPrevNode();
3754 InstsToKill.push_back(prev);
3760 result = call->getArgOperand(0);
3761 InstsToKill.push_back(call);
3765 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3766 if (!bitcast->hasOneUse())
3768 InstsToKill.push_back(bitcast);
3769 result = bitcast->getOperand(0);
3773 for (
auto *I : InstsToKill)
3774 I->eraseFromParent();
3777 if (doRetainAutorelease)
3781 return CGF.
Builder.CreateBitCast(result, resultType);
3786 llvm::Value *result) {
3789 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3798 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3799 if (!retainCall || retainCall->getCalledOperand() !=
3804 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3805 llvm::LoadInst *load =
3806 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3807 if (!load || load->isAtomic() || load->isVolatile() ||
3814 llvm::Type *resultType = result->getType();
3816 assert(retainCall->use_empty());
3817 retainCall->eraseFromParent();
3820 return CGF.
Builder.CreateBitCast(load, resultType);
3827 llvm::Value *result) {
3850 auto GetStoreIfValid = [&CGF,
3851 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3852 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3853 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3859 assert(!SI->isAtomic() &&
3867 if (!ReturnValuePtr->hasOneUse()) {
3868 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3874 const llvm::Instruction *LoadIntoFakeUse =
nullptr;
3875 for (llvm::Instruction &I : llvm::reverse(*IP)) {
3879 if (LoadIntoFakeUse == &I)
3883 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) {
3884 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3887 if (II->getIntrinsicID() == llvm::Intrinsic::fake_use) {
3888 LoadIntoFakeUse = dyn_cast<llvm::Instruction>(II->getArgOperand(0));
3892 return GetStoreIfValid(&I);
3897 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3903 llvm::BasicBlock *StoreBB = store->getParent();
3904 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3906 while (IP != StoreBB) {
3907 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3923 int BitWidth,
int CharWidth) {
3924 assert(CharWidth <= 64);
3925 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3928 if (BitOffset >= CharWidth) {
3929 Pos += BitOffset / CharWidth;
3930 BitOffset = BitOffset % CharWidth;
3933 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3934 if (BitOffset + BitWidth >= CharWidth) {
3935 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3936 BitWidth -= CharWidth - BitOffset;
3940 while (BitWidth >= CharWidth) {
3942 BitWidth -= CharWidth;
3946 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3954 int StorageSize,
int BitOffset,
int BitWidth,
3955 int CharWidth,
bool BigEndian) {
3958 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3961 std::reverse(TmpBits.begin(), TmpBits.end());
3963 for (uint64_t
V : TmpBits)
3964 Bits[StorageOffset++] |=
V;
3967static void setUsedBits(CodeGenModule &, QualType,
int,
3968 SmallVectorImpl<uint64_t> &);
3979 const RecordDecl *RD = RTy->getDecl()->getDefinition();
4010 QualType ETy = Context.getBaseElementType(ATy);
4011 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
4015 for (
int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
4016 auto Src = TmpBits.begin();
4017 auto Dst = Bits.begin() + Offset + I * Size;
4018 for (
int J = 0; J < Size; ++J)
4031 if (
const auto *ATy = Context.getAsConstantArrayType(QTy))
4034 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
4038 std::fill_n(Bits.begin() + Offset, Size,
4039 (uint64_t(1) << Context.getCharWidth()) - 1);
4043 int Pos,
int Size,
int CharWidth,
4048 for (
auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
4050 Mask = (Mask << CharWidth) | *P;
4052 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
4054 Mask = (Mask << CharWidth) | *--P;
4063 llvm::IntegerType *ITy,
4065 assert(Src->getType() == ITy);
4066 assert(ITy->getScalarSizeInBits() <= 64);
4068 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
4069 int Size = DataLayout.getTypeStoreSize(ITy);
4073 int CharWidth =
CGM.getContext().getCharWidth();
4077 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
4083 llvm::ArrayType *ATy,
4085 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
4086 int Size = DataLayout.getTypeStoreSize(ATy);
4091 int CharWidth =
CGM.getContext().getCharWidth();
4093 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
4095 llvm::Value *R = llvm::PoisonValue::get(ATy);
4096 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
4098 DataLayout.isBigEndian());
4099 MaskIndex += CharsPerElt;
4100 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
4101 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
4102 R =
Builder.CreateInsertValue(R, T1, I);
4110 uint64_t RetKeyInstructionsSourceAtom) {
4125 auto *I =
Builder.CreateRetVoid();
4126 if (RetKeyInstructionsSourceAtom)
4133 llvm::DebugLoc RetDbgLoc;
4134 llvm::Value *RV =
nullptr;
4144 llvm::Function::arg_iterator EI =
CurFn->arg_end();
4146 llvm::Value *ArgStruct = &*EI;
4147 llvm::Value *SRet =
Builder.CreateStructGEP(
4156 auto AI =
CurFn->arg_begin();
4174 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
4201 RetDbgLoc = SI->getDebugLoc();
4203 RV = SI->getValueOperand();
4204 SI->eraseFromParent();
4227 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
4228 RT = FD->getReturnType();
4229 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
4230 RT = MD->getReturnType();
4232 RT =
BlockInfo->BlockExpression->getFunctionType()->getReturnType();
4234 llvm_unreachable(
"Unexpected function/method type");
4250 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
4255 unsigned unpaddedIndex = 0;
4256 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4257 auto coercedEltType = coercionType->getElementType(i);
4261 auto eltAddr =
Builder.CreateStructGEP(addr, i);
4264 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
4265 : unpaddedCoercionType,
4267 results.push_back(elt);
4271 if (results.size() == 1) {
4279 RV = llvm::PoisonValue::get(returnType);
4280 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
4281 RV =
Builder.CreateInsertValue(RV, results[i], i);
4288 RV =
CGM.getABIInfo().createCoercedLoad(
V, RetAI, *
this);
4293 llvm_unreachable(
"Invalid ABI kind for return argument");
4296 llvm::Instruction *Ret;
4302 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4309 Ret =
Builder.CreateRetVoid();
4313 Ret->setDebugLoc(std::move(RetDbgLoc));
4315 llvm::Value *Backup = RV ? Ret->getOperand(0) :
nullptr;
4316 if (RetKeyInstructionsSourceAtom)
4332 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4333 if (
SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
4334 RetNNAttr =
CurCodeDecl->getAttr<ReturnsNonNullAttr>();
4336 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4344 assert(!requiresReturnValueNullabilityCheck() &&
4345 "Cannot check nullability and the nonnull attribute");
4346 AttrLoc = RetNNAttr->getLocation();
4347 CheckKind = SanitizerKind::SO_ReturnsNonnullAttribute;
4348 Handler = SanitizerHandler::NonnullReturn;
4350 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4351 if (
auto *TSI = DD->getTypeSourceInfo())
4353 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4354 CheckKind = SanitizerKind::SO_NullabilityReturn;
4355 Handler = SanitizerHandler::NullabilityReturn;
4364 llvm::Value *SLocPtr =
Builder.CreateLoad(ReturnLocation,
"return.sloc.load");
4365 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4366 if (requiresReturnValueNullabilityCheck())
4368 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4369 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4375 llvm::Value *DynamicData[] = {SLocPtr};
4376 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, DynamicData);
4395 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4396 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4421 if (
type->isReferenceType()) {
4430 param->
hasAttr<NSConsumedAttr>() &&
type->isObjCRetainableType()) {
4431 llvm::Value *ptr =
Builder.CreateLoad(local);
4434 Builder.CreateStore(null, local);
4445 type->castAsRecordDecl()->isParamDestroyedInCallee() &&
4450 "cleanup for callee-destructed param not recorded");
4452 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4458 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4468 const LValue &srcLV = writeback.
Source;
4469 Address srcAddr = srcLV.getAddress();
4471 "shouldn't have writeback for provably null argument");
4479 llvm::BasicBlock *contBB =
nullptr;
4485 if (!provablyNonNull) {
4490 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4499 "icr.writeback-cast");
4508 if (writeback.
ToUse) {
4533 if (!provablyNonNull)
4542 for (
const auto &I : llvm::reverse(Cleanups)) {
4544 I.IsActiveIP->eraseFromParent();
4550 if (uop->getOpcode() == UO_AddrOf)
4551 return uop->getSubExpr();
4576 Address srcAddr = srcLV.getAddress();
4581 llvm::PointerType *destType =
4583 llvm::Type *destElemType =
4610 llvm::BasicBlock *contBB =
nullptr;
4611 llvm::BasicBlock *originBB =
nullptr;
4614 llvm::Value *finalArgument;
4618 if (provablyNonNull) {
4623 finalArgument = CGF.
Builder.CreateSelect(
4624 isNull, llvm::ConstantPointerNull::get(destType),
4630 originBB = CGF.
Builder.GetInsertBlock();
4633 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4635 condEval.
begin(CGF);
4639 llvm::Value *valueToUse =
nullptr;
4647 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4664 if (shouldCopy && !provablyNonNull) {
4665 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4670 llvm::PHINode *phiToUse =
4671 CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
"icr.to-use");
4672 phiToUse->addIncoming(valueToUse, copyBB);
4673 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4675 valueToUse = phiToUse;
4689 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4695 CGF.
Builder.CreateStackRestore(StackBase);
4702 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4703 SanOpts.has(SanitizerKind::NullabilityArg)))
4708 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4711 const NonNullAttr *NNAttr =
nullptr;
4712 if (
SanOpts.has(SanitizerKind::NonnullAttribute))
4715 bool CanCheckNullability =
false;
4716 if (
SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4717 !PVD->getType()->isRecordType()) {
4718 auto Nullability = PVD->getType()->getNullability();
4719 CanCheckNullability = Nullability &&
4721 PVD->getTypeSourceInfo();
4724 if (!NNAttr && !CanCheckNullability)
4731 AttrLoc = NNAttr->getLocation();
4732 CheckKind = SanitizerKind::SO_NonnullAttribute;
4733 Handler = SanitizerHandler::NonnullArg;
4735 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4736 CheckKind = SanitizerKind::SO_NullabilityArg;
4737 Handler = SanitizerHandler::NullabilityArg;
4742 llvm::Constant *StaticData[] = {
4745 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4747 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, {});
4753 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4754 SanOpts.has(SanitizerKind::NullabilityArg)))
4773 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4784 return classDecl->getTypeParamListAsWritten();
4788 return catDecl->getTypeParamList();
4798 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4802 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4803 "Can't skip parameters if type info is not provided");
4813 bool IsVariadic =
false;
4815 const auto *MD = dyn_cast<const ObjCMethodDecl *>(
Prototype.P);
4817 IsVariadic = MD->isVariadic();
4819 MD,
CGM.getTarget().getTriple().isOSWindows());
4820 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4821 MD->param_type_end());
4824 IsVariadic = FPT->isVariadic();
4825 ExplicitCC = FPT->getExtInfo().getCC();
4826 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4827 FPT->param_type_end());
4835 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4837 QualType ArgTy = (*Arg)->getType();
4838 if (
const auto *OBT = ParamTy->
getAs<OverflowBehaviorType>())
4839 ParamTy = OBT->getUnderlyingType();
4840 if (
const auto *OBT = ArgTy->
getAs<OverflowBehaviorType>())
4841 ArgTy = OBT->getUnderlyingType();
4844 getContext().getCanonicalType(ParamTy).getTypePtr() ==
4845 getContext().getCanonicalType(ArgTy).getTypePtr()) &&
4846 "type mismatch in call argument!");
4852 assert((Arg == ArgRange.end() || IsVariadic) &&
4853 "Extra arguments in non-variadic function!");
4858 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4859 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4860 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4868 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4872 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4881 auto SizeTy = Context.getSizeType();
4883 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4884 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(
4885 Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic());
4890 std::swap(Args.back(), *(&Args.back() - 1));
4896 "inalloca only supported on x86");
4901 size_t CallArgsStart = Args.size();
4902 for (
unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4903 unsigned Idx = LeftToRight ? I : E - I - 1;
4905 unsigned InitialArgSize = Args.size();
4909 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4913 "Argument and parameter types don't match");
4917 assert(InitialArgSize + 1 == Args.size() &&
4918 "The code below depends on only adding one arg per EmitCallArg");
4919 (void)InitialArgSize;
4922 if (!Args.back().hasLValue()) {
4923 RValue RVArg = Args.back().getKnownRValue();
4925 ParamsToSkip + Idx);
4929 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4936 std::reverse(Args.begin() + CallArgsStart, Args.end());
4945struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4978 if (!HasLV &&
RV.isScalar())
4980 else if (!HasLV &&
RV.isComplex())
4983 auto Addr = HasLV ?
LV.getAddress() :
RV.getAggregateAddress();
4987 HasLV ?
LV.isVolatileQualified()
4988 :
RV.isVolatileQualified());
5000 std::optional<DisableDebugLocationUpdates> Dis;
5004 dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
5018 "reference binding to unmaterialized r-value!");
5030 if (
type->isRecordType() &&
5031 type->castAsRecordDecl()->isParamDestroyedInCallee()) {
5038 bool DestroyedInCallee =
true, NeedsCleanup =
true;
5039 if (
const auto *RD =
type->getAsCXXRecordDecl())
5040 DestroyedInCallee = RD->hasNonTrivialDestructor();
5042 NeedsCleanup =
type.isDestructedType();
5044 if (DestroyedInCallee)
5051 if (DestroyedInCallee && NeedsCleanup) {
5058 llvm::Instruction *IsActive =
5067 !
type->isArrayParameterType() && !
type.isNonTrivialToPrimitiveCopy()) {
5077QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
5081 if (!getTarget().
getTriple().isOSWindows())
5085 getContext().getTypeSize(Arg->
getType()) <
5089 return getContext().getIntPtrType();
5097void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
5098 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
5099 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
5100 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
5101 CGM.getNoObjCARCExceptionsMetadata());
5107 const llvm::Twine &name) {
5108 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value *>(), name);
5114 ArrayRef<Address> args,
5115 const llvm::Twine &name) {
5116 SmallVector<llvm::Value *, 3> values;
5117 for (
auto arg : args)
5118 values.push_back(
arg.emitRawPointer(*
this));
5119 return EmitNounwindRuntimeCall(callee, values, name);
5124 ArrayRef<llvm::Value *> args,
5125 const llvm::Twine &name) {
5126 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
5127 call->setDoesNotThrow();
5134 const llvm::Twine &name) {
5135 return EmitRuntimeCall(callee, {},
name);
5140SmallVector<llvm::OperandBundleDef, 1>
5149 if (
auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
5150 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
5151 auto IID = CalleeFn->getIntrinsicID();
5152 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
5165 const llvm::Twine &name) {
5166 llvm::CallInst *call = Builder.CreateCall(
5167 callee, args, getBundlesForFunclet(callee.getCallee()), name);
5168 call->setCallingConv(getRuntimeCC());
5170 if (CGM.shouldEmitConvergenceTokens() && call->isConvergent())
5182 llvm::InvokeInst *invoke =
Builder.CreateInvoke(
5184 invoke->setDoesNotReturn();
5187 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
5188 call->setDoesNotReturn();
5197 const Twine &name) {
5205 const Twine &name) {
5215 const Twine &Name) {
5220 llvm::CallBase *Inst;
5222 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
5225 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
5232 if (
CGM.getLangOpts().ObjCAutoRefCount)
5233 AddObjCARCExceptionMetadata(Inst);
5238void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
5240 DeferredReplacements.push_back(
5241 std::make_pair(llvm::WeakTrackingVH(Old),
New));
5248[[nodiscard]] llvm::AttributeList
5249maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
5250 const llvm::AttributeList &Attrs,
5251 llvm::Align NewAlign) {
5252 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
5253 if (CurAlign >= NewAlign)
5255 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
5256 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
5257 .addRetAttribute(Ctx, AlignAttr);
5260template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
5265 const AlignedAttrTy *AA =
nullptr;
5267 llvm::Value *Alignment =
nullptr;
5268 llvm::ConstantInt *OffsetCI =
nullptr;
5274 AA = FuncDecl->
getAttr<AlignedAttrTy>();
5279 [[nodiscard]] llvm::AttributeList
5280 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5281 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5283 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5288 if (!AlignmentCI->getValue().isPowerOf2())
5290 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5293 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5301 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
5305 AA->getLocation(), Alignment, OffsetCI);
5311class AssumeAlignedAttrEmitter final
5312 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5314 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl)
5315 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5320 if (Expr *Offset = AA->getOffset()) {
5322 if (OffsetCI->isNullValue())
5329class AllocAlignAttrEmitter final
5330 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5332 AllocAlignAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl,
5333 const CallArgList &CallArgs)
5334 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5338 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5347 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5348 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5349 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5352 unsigned MaxVectorWidth = 0;
5353 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5354 for (
auto *I : ST->elements())
5356 return MaxVectorWidth;
5363 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5365 bool IsVirtualFunctionPointerThunk) {
5368 assert(Callee.isOrdinary() || Callee.isVirtual());
5375 llvm::FunctionType *IRFuncTy =
getTypes().GetFunctionType(CallInfo);
5377 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5378 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5385 if ((TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5386 (TargetDecl->
hasAttr<TargetAttr>() ||
5390 TargetDecl->
hasAttr<TargetAttr>())))
5397 const FunctionDecl *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl);
5398 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
CGM, Loc, CallerDecl,
5399 CalleeDecl, CallArgs, RetTy);
5406 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5407 const llvm::DataLayout &DL =
CGM.getDataLayout();
5409 llvm::AllocaInst *AI;
5411 IP = IP->getNextNode();
5412 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5418 AI->setAlignment(Align.getAsAlign());
5419 AI->setUsedWithInAlloca(
true);
5420 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5421 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5424 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), CallInfo);
5430 bool NeedSRetLifetimeEnd =
false;
5436 if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.
isIndirect()) {
5438 IRFunctionArgs.getSRetArgNo(),
5447 if (IRFunctionArgs.hasSRetArg()) {
5460 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5478 assert(CallInfo.
arg_size() == CallArgs.size() &&
5479 "Mismatch between function signature & arguments.");
5482 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5483 I != E; ++I, ++info_it, ++ArgNo) {
5487 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5488 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5491 unsigned FirstIRArg, NumIRArgs;
5492 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5494 bool ArgHasMaybeUndefAttr =
5499 assert(NumIRArgs == 0);
5501 if (I->isAggregate()) {
5503 ? I->getKnownLValue().getAddress()
5504 : I->getKnownRValue().getAggregateAddress();
5505 llvm::Instruction *Placeholder =
5510 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5511 Builder.SetInsertPoint(Placeholder);
5524 deferPlaceholderReplacement(Placeholder,
Addr.getPointer());
5529 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5530 "indirect-arg-temp");
5531 I->copyInto(*
this,
Addr);
5540 I->copyInto(*
this,
Addr);
5547 assert(NumIRArgs == 1);
5548 if (I->isAggregate()) {
5558 ? I->getKnownLValue().getAddress()
5559 : I->getKnownRValue().getAggregateAddress();
5561 const llvm::DataLayout *TD = &
CGM.getDataLayout();
5563 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5564 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5565 TD->getAllocaAddrSpace()) &&
5566 "indirect argument must be in alloca address space");
5568 bool NeedCopy =
false;
5569 if (
Addr.getAlignment() < Align &&
5570 llvm::getOrEnforceKnownAlignment(
Addr.emitRawPointer(*
this),
5574 }
else if (I->hasLValue()) {
5575 auto LV = I->getKnownLValue();
5580 if (!isByValOrRef ||
5581 (LV.getAlignment() <
getContext().getTypeAlignInChars(I->Ty))) {
5585 if (isByValOrRef &&
Addr.getType()->getAddressSpace() !=
5594 auto *T = llvm::PointerType::get(
CGM.getLLVMContext(),
5602 if (ArgHasMaybeUndefAttr)
5603 Val =
Builder.CreateFreeze(Val);
5604 IRCallArgs[FirstIRArg] = Val;
5607 }
else if (I->getType()->isArrayParameterType()) {
5613 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5622 if (ArgHasMaybeUndefAttr)
5623 Val =
Builder.CreateFreeze(Val);
5624 IRCallArgs[FirstIRArg] = Val;
5629 CallLifetimeEndAfterCall.emplace_back(AI);
5632 I->copyInto(*
this, AI);
5637 assert(NumIRArgs == 0);
5645 assert(NumIRArgs == 1);
5647 if (!I->isAggregate())
5648 V = I->getKnownRValue().getScalarVal();
5651 I->hasLValue() ? I->getKnownLValue().getAddress()
5652 : I->getKnownRValue().getAggregateAddress());
5658 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5662 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5669 llvm::Value *errorValue =
Builder.CreateLoad(swiftErrorArg);
5670 Builder.CreateStore(errorValue, swiftErrorTemp);
5675 V->getType()->isIntegerTy())
5682 if (FirstIRArg < IRFuncTy->getNumParams() &&
5683 V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
5684 assert(
V->getType()->isPointerTy() &&
"Only pointers can mismatch!");
5688 if (ArgHasMaybeUndefAttr)
5690 IRCallArgs[FirstIRArg] =
V;
5694 llvm::StructType *STy =
5699 if (!I->isAggregate()) {
5701 I->copyInto(*
this, Src);
5703 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5704 : I->getKnownRValue().getAggregateAddress();
5714 llvm::TypeSize SrcTypeSize =
5715 CGM.getDataLayout().getTypeAllocSize(SrcTy);
5716 llvm::TypeSize DstTypeSize =
CGM.getDataLayout().getTypeAllocSize(STy);
5717 if (SrcTypeSize.isScalable()) {
5718 assert(STy->containsHomogeneousScalableVectorTypes() &&
5719 "ABI only supports structure with homogeneous scalable vector "
5721 assert(SrcTypeSize == DstTypeSize &&
5722 "Only allow non-fractional movement of structure with "
5723 "homogeneous scalable vector type");
5724 assert(NumIRArgs == STy->getNumElements());
5726 llvm::Value *StoredStructValue =
5728 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5729 llvm::Value *Extract =
Builder.CreateExtractValue(
5730 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5731 IRCallArgs[FirstIRArg + i] = Extract;
5734 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5735 uint64_t DstSize = DstTypeSize.getFixedValue();
5736 bool HasPFPFields =
getContext().hasPFPFields(I->Ty);
5742 if (HasPFPFields || SrcSize < DstSize) {
5753 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5759 assert(NumIRArgs == STy->getNumElements());
5760 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5762 llvm::Value *LI =
Builder.CreateLoad(EltPtr);
5763 if (ArgHasMaybeUndefAttr)
5764 LI =
Builder.CreateFreeze(LI);
5765 IRCallArgs[FirstIRArg + i] = LI;
5770 assert(NumIRArgs == 1);
5778 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5783 if (ArgHasMaybeUndefAttr)
5784 Load =
Builder.CreateFreeze(Load);
5785 IRCallArgs[FirstIRArg] = Load;
5793 auto layout =
CGM.getDataLayout().getStructLayout(coercionType);
5795 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
5799 bool NeedLifetimeEnd =
false;
5800 if (I->isAggregate()) {
5801 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5802 : I->getKnownRValue().getAggregateAddress();
5805 RValue RV = I->getKnownRValue();
5809 auto scalarAlign =
CGM.getDataLayout().getPrefTypeAlign(scalarType);
5814 layout->getAlignment(), scalarAlign)),
5816 nullptr, &AllocaAddr);
5824 unsigned IRArgPos = FirstIRArg;
5825 unsigned unpaddedIndex = 0;
5826 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5827 llvm::Type *eltType = coercionType->getElementType(i);
5834 : unpaddedCoercionType,
5836 if (ArgHasMaybeUndefAttr)
5837 elt =
Builder.CreateFreeze(elt);
5838 IRCallArgs[IRArgPos++] = elt;
5840 assert(IRArgPos == FirstIRArg + NumIRArgs);
5842 if (NeedLifetimeEnd)
5848 unsigned IRArgPos = FirstIRArg;
5849 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5850 assert(IRArgPos == FirstIRArg + NumIRArgs);
5856 if (!I->isAggregate()) {
5858 I->copyInto(*
this, Src);
5860 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5861 : I->getKnownRValue().getAggregateAddress();
5867 CGM.getABIInfo().createCoercedLoad(Src, ArgInfo, *
this);
5868 IRCallArgs[FirstIRArg] = Load;
5874 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*
this);
5880 assert(IRFunctionArgs.hasInallocaArg());
5881 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5892 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5893 llvm::Value *Ptr) -> llvm::Function * {
5894 if (!CalleeFT->isVarArg())
5898 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5899 if (CE->getOpcode() == llvm::Instruction::BitCast)
5900 Ptr = CE->getOperand(0);
5903 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5907 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5911 if (OrigFT->isVarArg() ||
5912 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5913 OrigFT->getReturnType() != CalleeFT->getReturnType())
5916 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5917 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5923 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5925 IRFuncTy = OrigFn->getFunctionType();
5936 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5937 LargestVectorWidth = std::max(LargestVectorWidth,
5942 llvm::AttributeList Attrs;
5943 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5948 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5950 CGM.Error(Loc,
"__vectorcall calling convention is not currently "
5955 if (FD->hasAttr<StrictFPAttr>())
5957 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5962 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5963 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
5968 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5972 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5977 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
5978 CallerDecl, CalleeDecl))
5980 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5985 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5994 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>()) &&
5995 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
5996 CallerDecl, CalleeDecl)) {
5998 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
6003 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
6010 CannotThrow =
false;
6019 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
6021 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
6022 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
6030 if (NeedSRetLifetimeEnd)
6038 if (
SanOpts.has(SanitizerKind::KCFI) &&
6039 !isa_and_nonnull<FunctionDecl>(TargetDecl))
6046 if (FD->hasAttr<StrictFPAttr>())
6048 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
6050 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
6051 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
6053 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
6054 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
6059 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
6062 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
6066 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
6067 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
6072 if (
CGM.getCodeGenOpts().CallGraphSection) {
6076 else if (
const auto *FPT =
6077 Callee.getAbstractInfo().getCalleeFunctionProtoType())
6081 "Cannot find the callee type to generate callee_type metadata.");
6085 CGM.createCalleeTypeMetadataForIcall(CST, *callOrInvoke);
6092 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
6093 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
6094 if (A->getGuard() == CFGuardAttr::GuardArg::nocf &&
6095 !CI->getCalledFunction())
6101 CI->setAttributes(Attrs);
6102 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
6106 if (!CI->getType()->isVoidTy())
6107 CI->setName(
"call");
6109 if (
CGM.shouldEmitConvergenceTokens() && CI->isConvergent())
6110 CI = addConvergenceControlToken(CI);
6113 LargestVectorWidth =
6119 if (!CI->getCalledFunction())
6120 PGO->valueProfile(
Builder, llvm::IPVK_IndirectCallTarget, CI, CalleePtr);
6124 if (
CGM.getLangOpts().ObjCAutoRefCount)
6125 AddObjCARCExceptionMetadata(CI);
6128 bool IsPPC =
getTarget().getTriple().isPPC();
6129 bool IsMIPS =
getTarget().getTriple().isMIPS();
6130 bool HasMips16 =
false;
6133 HasMips16 = TargetOpts.
FeatureMap.lookup(
"mips16");
6135 HasMips16 = llvm::is_contained(TargetOpts.
Features,
"+mips16");
6137 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
6138 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
6139 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
6140 else if (IsMustTail) {
6143 CGM.getDiags().Report(Loc, diag::err_aix_musttail_unsupported);
6146 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 0;
6147 else if (
Call->isIndirectCall())
6148 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 1;
6149 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
6154 CGM.addUndefinedGlobalForTailCall(
6157 llvm::GlobalValue::LinkageTypes
Linkage =
CGM.getFunctionLinkage(
6159 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
6160 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
6161 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail)
6169 CGM.getDiags().Report(Loc, diag::err_mips_impossible_musttail) << 0;
6170 else if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
6171 CGM.addUndefinedGlobalForTailCall({FD, Loc});
6173 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
6187 bool NeedSrcLoc = TargetDecl->
hasAttr<ErrorAttr>();
6188 if (!NeedSrcLoc &&
CGM.getCodeGenOpts().ShowInliningChain) {
6189 if (
const auto *FD = dyn_cast<FunctionDecl>(TargetDecl))
6190 NeedSrcLoc = FD->isInlined() || FD->hasAttr<AlwaysInlineAttr>() ||
6192 FD->isInAnonymousNamespace();
6196 auto *MD = llvm::ConstantAsMetadata::get(
Line);
6197 CI->setMetadata(
"srcloc", llvm::MDNode::get(
getLLVMContext(), {MD}));
6206 if (CI->doesNotReturn()) {
6207 if (NeedSRetLifetimeEnd)
6211 if (
SanOpts.has(SanitizerKind::Unreachable)) {
6214 if (
auto *F = CI->getCalledFunction())
6215 F->removeFnAttr(llvm::Attribute::NoReturn);
6216 CI->removeFnAttr(llvm::Attribute::NoReturn);
6220 if (
SanOpts.hasOneOf(SanitizerKind::Address |
6221 SanitizerKind::KernelAddress)) {
6223 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
6225 auto *FnType = llvm::FunctionType::get(
CGM.VoidTy,
false);
6226 llvm::FunctionCallee Fn =
6227 CGM.CreateRuntimeFunction(FnType,
"__asan_handle_no_return");
6233 Builder.ClearInsertionPoint();
6255 if (CI->doesNotThrow())
6258 diag::err_musttail_noexcept_mismatch);
6264 if (Cleanup && Cleanup->isFakeUse()) {
6265 CGBuilderTy::InsertPointGuard IPG(
Builder);
6267 Cleanup->getCleanup()->Emit(*
this, EHScopeStack::Cleanup::Flags());
6268 }
else if (!(Cleanup &&
6269 Cleanup->getCleanup()->isRedundantBeforeReturn())) {
6270 CGM.ErrorUnsupported(
MustTailCall,
"tail call skipping over cleanups");
6273 if (CI->getType()->isVoidTy())
6277 Builder.ClearInsertionPoint();
6283 if (swiftErrorTemp.
isValid()) {
6284 llvm::Value *errorResult =
Builder.CreateLoad(swiftErrorTemp);
6285 Builder.CreateStore(errorResult, swiftErrorArg);
6302 if (IsVirtualFunctionPointerThunk) {
6315 unsigned unpaddedIndex = 0;
6316 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
6317 llvm::Type *eltType = coercionType->getElementType(i);
6321 llvm::Value *elt = CI;
6322 if (requiresExtract)
6323 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
6325 assert(unpaddedIndex == 0);
6326 Builder.CreateStore(elt, eltAddr);
6334 if (NeedSRetLifetimeEnd)
6351 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
6352 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
6360 llvm::Value *
V = CI;
6361 if (
V->getType() != RetIRTy)
6371 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
6372 llvm::Value *
V = CI;
6373 if (
auto *ScalableSrcTy =
6374 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
6375 if (FixedDstTy->getElementType() ==
6376 ScalableSrcTy->getElementType()) {
6377 V =
Builder.CreateExtractVector(FixedDstTy,
V, uint64_t(0),
6387 getContext().getTypeInfoDataSizeInChars(RetTy).Width.getQuantity();
6391 DestIsVolatile =
false;
6392 DestSize =
getContext().getTypeSizeInChars(RetTy).getQuantity();
6402 CI, RetTy, StorePtr,
6416 DestIsVolatile =
false;
6418 CGM.getABIInfo().createCoercedStore(CI, StorePtr, RetAI, DestIsVolatile,
6425 llvm_unreachable(
"Invalid ABI kind for return argument");
6428 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6433 if (Ret.isScalar() && TargetDecl) {
6434 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6435 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6441 LifetimeEnd.Emit(*
this, {});
6453 if (CalleeDecl && !CalleeDecl->
hasAttr<NoDebugAttr>() &&
6454 DI->getCallSiteRelatedAttrs() != llvm::DINode::FlagZero) {
6455 CodeGenFunction CalleeCGF(
CGM);
6457 Callee.getAbstractInfo().getCalleeDecl();
6458 CalleeCGF.
CurGD = CalleeGlobalDecl;
6461 DI->EmitFuncDeclForCallSite(
6462 CI, DI->getFunctionType(CalleeDecl, ResTy, Args), CalleeGlobalDecl);
6465 DI->addCallTargetIfVirtual(CalleeDecl, CI);
6491 if (
VE->isMicrosoftABI())
6492 return CGM.getABIInfo().EmitMSVAArg(*
this, VAListAddr, Ty, Slot);
6493 return CGM.getABIInfo().EmitVAArg(*
this, VAListAddr, Ty, Slot);
6498 CGF.disableDebugInfo();
6502 CGF.enableDebugInfo();
static ExtParameterInfoList getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static CanQualTypeList getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsTargetDefaultMSABI)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static llvm::Value * CreatePFPCoercedLoad(Address Src, QualType SrcFETy, llvm::Type *Ty, CodeGenFunction &CGF)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
SmallVector< CanQualType, 16 > CanQualTypeList
static std::pair< llvm::Value *, bool > CoerceScalableToFixed(CodeGenFunction &CGF, llvm::FixedVectorType *ToTy, llvm::ScalableVectorType *FromTy, llvm::Value *V, StringRef Name="")
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static llvm::Value * CreateCoercedLoad(Address Src, QualType SrcFETy, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
SmallVector< FunctionProtoType::ExtParameterInfo, 16 > ExtParameterInfoList
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static CanQualTypeList getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static bool CreatePFPCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, CodeGenFunction &CGF)
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
static void addNoBuiltinAttributes(mlir::MLIRContext &ctx, mlir::NamedAttrList &attrs, const LangOptions &langOpts, const NoBuiltinAttr *nba=nullptr)
static void addDenormalModeAttrs(llvm::DenormalMode fpDenormalMode, llvm::DenormalMode fp32DenormalMode, mlir::NamedAttrList &attrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static unsigned getNoFPClassTestMask(const LangOptions &langOpts)
Compute the nofpclass mask for FP types based on language options.
static void appendParameterTypes(const CIRGenTypes &cgt, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > fpt)
Adds the formal parameters in FPT to the given prefix.
static const CIRGenFunctionInfo & arrangeFreeFunctionLikeCall(CIRGenTypes &cgt, CIRGenModule &cgm, const CallArgList &args, const FunctionType *fnType)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
#define CC_VLS_CASE(ABI_VLEN)
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static StringRef getTriple(const Command &Job)
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
CanQualType getCanonicalSizeType() const
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
std::vector< PFPField > findPFPFields(QualType Ty) const
Returns a list of PFP fields for the given type, including subfields in bases or other fields,...
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
This class is used for builtin types like 'int'.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const
ConstExprIterator const_arg_iterator
Represents a canonical, potentially-qualified type.
static CanQual< Type > CreateUnsafe(QualType Other)
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
unsigned getAddressSpace() const
Return the address space that this address resides in.
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
llvm::StringRef getName() const
Return the IR name of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
const ArgInfo * const_arg_iterator
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
const Decl * getDecl() const
unsigned getNumParams() const
bool hasFunctionDecl() const
An object to manage conditionally-evaluated expressions.
void begin(CodeGenFunction &CGF)
void end(CodeGenFunction &CGF)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
void callCStructDestructor(LValue Dst)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
const CodeGen::CGBlockInfo * BlockInfo
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void CreateCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
llvm::BasicBlock * getInvokeDest()
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
CGDebugInfo * getDebugInfo()
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
void EmitLifetimeEnd(llvm::Value *Addr)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
llvm::Type * ConvertTypeForMem(QualType T)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CallExpr * MustTailCall
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
This class organizes the cross-function state that is used while generating LLVM code.
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
ObjCEntrypoints & getObjCEntrypoints() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeDeviceKernelCallerDeclaration(QualType resultType, const FunctionArgList &args)
A device kernel caller function is an offload device entry point function with a target device depend...
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeFunctionDeclaration(const GlobalDecl GD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
A saved depth on the scope stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
static void initPointerAuthFnAttributes(const PointerAuthOptions &Opts, llvm::AttrBuilder &FuncAttrs)
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
const FunctionType * getFunctionType(bool BlocksToo=true) const
Looks through the Decl's underlying type to extract a FunctionType when possible.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
bool isZeroLengthBitField() const
Is this a zero-length bit-field?
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
@ SME_AgnosticZAStateMask
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
KernelReferenceKind getKernelReferenceKind() const
CXXDtorType getDtorType() const
const Decl * getDecl() const
This class represents temporary values used to represent inout and out arguments in HLSL.
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
field_iterator field_end() const
bool isParamDestroyedInCallee() const
field_iterator field_begin() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
bool isIncompleteArrayType() const
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
RecordDecl * castAsRecordDecl() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isRecordType() const
bool isObjCRetainableType() const
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool This(InterpState &S, CodePtr OpPC)
@ Address
A pointer to a ValueDecl.
PRESERVE_NONE bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isa(CodeGen::Address addr)
static bool classof(const OMPClause *T)
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_VectorDeleting
Vector deleting dtor.
@ Dtor_Complete
Complete object dtor.
@ Dtor_Deleting
Deleting dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
@ Struct
The "struct" keyword introduces the elaborated-type-specifier.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
const Expr * WritebackExpr
An Expression (optional) that performs the writeback with any required casting.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
~DisableDebugLocationUpdates()
DisableDebugLocationUpdates(CodeGenFunction &CGF)
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.