36#include "llvm/ABI/FunctionInfo.h"
37#include "llvm/ABI/IRTypeMapper.h"
38#include "llvm/ABI/TargetInfo.h"
39#include "llvm/ABI/Types.h"
40#include "llvm/ADT/STLExtras.h"
41#include "llvm/ADT/StringExtras.h"
42#include "llvm/Analysis/ValueTracking.h"
43#include "llvm/IR/Assumptions.h"
44#include "llvm/IR/AttributeMask.h"
45#include "llvm/IR/Attributes.h"
46#include "llvm/IR/CallingConv.h"
47#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/DebugInfoMetadata.h"
49#include "llvm/IR/InlineAsm.h"
50#include "llvm/IR/IntrinsicInst.h"
51#include "llvm/IR/Intrinsics.h"
52#include "llvm/IR/Type.h"
53#include "llvm/Transforms/Utils/Local.h"
63 return llvm::CallingConv::C;
65 return llvm::CallingConv::X86_StdCall;
67 return llvm::CallingConv::X86_FastCall;
69 return llvm::CallingConv::X86_RegCall;
71 return llvm::CallingConv::X86_ThisCall;
73 return llvm::CallingConv::Win64;
75 return llvm::CallingConv::X86_64_SysV;
77 return llvm::CallingConv::ARM_AAPCS;
79 return llvm::CallingConv::ARM_AAPCS_VFP;
81 return llvm::CallingConv::Intel_OCL_BI;
84 return llvm::CallingConv::C;
87 return llvm::CallingConv::X86_VectorCall;
89 return llvm::CallingConv::AArch64_VectorCall;
91 return llvm::CallingConv::AArch64_SVE_VectorCall;
93 return llvm::CallingConv::SPIR_FUNC;
95 return CGM.getTargetCodeGenInfo().getDeviceKernelCallingConv();
97 return llvm::CallingConv::PreserveMost;
99 return llvm::CallingConv::PreserveAll;
101 return llvm::CallingConv::Swift;
103 return llvm::CallingConv::SwiftTail;
105 return llvm::CallingConv::M68k_RTD;
107 return llvm::CallingConv::PreserveNone;
111#define CC_VLS_CASE(ABI_VLEN) \
112 case CC_RISCVVLSCall_##ABI_VLEN: \
113 return llvm::CallingConv::RISCV_VLSCall_##ABI_VLEN;
138 RecTy = Context.getCanonicalTagType(RD);
140 RecTy = Context.VoidTy;
145 return Context.getPointerType(RecTy);
178 assert(paramInfos.size() <= prefixArgs);
179 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
181 paramInfos.reserve(totalArgs);
184 paramInfos.resize(prefixArgs);
188 paramInfos.push_back(ParamInfo);
190 if (ParamInfo.hasPassObjectSize())
191 paramInfos.emplace_back();
194 assert(paramInfos.size() <= totalArgs &&
195 "Did we forget to insert pass_object_size args?");
197 paramInfos.resize(totalArgs);
207 if (!FPT->hasExtParameterInfos()) {
208 assert(paramInfos.empty() &&
209 "We have paramInfos, but the prototype doesn't?");
210 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
214 unsigned PrefixSize = prefix.size();
218 prefix.reserve(prefix.size() + FPT->getNumParams());
220 auto ExtInfos = FPT->getExtParameterInfos();
221 assert(ExtInfos.size() == FPT->getNumParams());
222 for (
unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
223 prefix.push_back(FPT->getParamType(I));
224 if (ExtInfos[I].hasPassObjectSize())
249 FTP->getExtInfo(), paramInfos,
Required);
259 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
264 bool IsTargetDefaultMSABI) {
269 if (D->
hasAttr<FastCallAttr>())
275 if (D->
hasAttr<ThisCallAttr>())
278 if (D->
hasAttr<VectorCallAttr>())
284 if (PcsAttr *PCS = D->
getAttr<PcsAttr>())
287 if (D->
hasAttr<AArch64VectorPcsAttr>())
290 if (D->
hasAttr<AArch64SVEPcsAttr>())
293 if (D->
hasAttr<DeviceKernelAttr>())
296 if (D->
hasAttr<IntelOclBiccAttr>())
305 if (D->
hasAttr<PreserveMostAttr>())
308 if (D->
hasAttr<PreserveAllAttr>())
314 if (D->
hasAttr<PreserveNoneAttr>())
317 if (D->
hasAttr<RISCVVectorCCAttr>())
320 if (RISCVVLSCCAttr *PCS = D->
getAttr<RISCVVLSCCAttr>()) {
321 switch (PCS->getVectorWidth()) {
323 llvm_unreachable(
"Invalid RISC-V VLS ABI VLEN");
324#define CC_VLS_CASE(ABI_VLEN) \
326 return CC_RISCVVLSCall_##ABI_VLEN;
361 return ::arrangeLLVMFunctionInfo(
362 *
this,
true, argTypes,
369 if (FD->
hasAttr<CUDAGlobalAttr>()) {
405 !Target.getCXXABI().hasConstructorVariants();
418 bool PassParams =
true;
420 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
423 if (
auto Inherited = CD->getInheritedConstructor())
435 if (!paramInfos.empty()) {
438 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
441 paramInfos.append(AddedArgs.
Suffix,
446 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
452 ? CGM.getContext().VoidPtrTy
455 argTypes, extInfo, paramInfos, required);
461 for (
auto &arg : args)
469 for (
auto &arg : args)
476 unsigned totalArgs) {
494 unsigned ExtraPrefixArgs,
unsigned ExtraSuffixArgs,
bool PassProtoArgs) {
496 for (
const auto &Arg : args)
497 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
500 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
505 FPT, TotalPrefixArgs + ExtraSuffixArgs)
511 ? CGM.getContext().VoidPtrTy
518 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
525 ArgTypes, Info, ParamInfos,
Required);
534 if (MD->isImplicitObjectMemberFunction())
542 if (DeviceKernelAttr::isOpenCLSpelling(FD->
getAttr<DeviceKernelAttr>()) &&
545 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
553 {}, noProto->getExtInfo(), {},
580 argTys.push_back(Context.getCanonicalParamType(receiverType));
582 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
584 argTys.push_back(Context.getCanonicalParamType(I->getType()));
586 I->hasAttr<NoEscapeAttr>());
587 extParamInfos.push_back(extParamInfo);
591 bool IsTargetDefaultMSABI =
597 if (
getContext().getLangOpts().ObjCAutoRefCount &&
598 MD->
hasAttr<NSReturnsRetainedAttr>())
635 assert(MD->
isVirtual() &&
"only methods have thunks");
652 ArgTys.push_back(*FTP->param_type_begin());
654 ArgTys.push_back(Context.IntTy);
655 CallingConv CC = Context.getDefaultCallingConvention(
667 unsigned numExtraRequiredArgs,
bool chainCall) {
668 assert(args.size() >= numExtraRequiredArgs);
678 if (proto->isVariadic())
681 if (proto->hasExtParameterInfos())
695 for (
const auto &arg : args)
700 paramInfos, required);
710 chainCall ? 1 : 0, chainCall);
739 for (
const auto &Arg : args)
740 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
780 assert(numPrefixArgs + 1 <= args.size() &&
781 "Emitting a call with less args than the required prefix?");
792 paramInfos, required);
803 assert(signature.
arg_size() <= args.size());
804 if (signature.
arg_size() == args.size())
809 if (!sigParamInfos.empty()) {
810 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
811 paramInfos.resize(args.size());
837 MappedArgTypes.reserve(FI.
arg_size());
839 MappedArgTypes.push_back(AbiMapper->convertType(Arg.type));
841 std::optional<unsigned> NumRequired;
844 NumRequired =
Required.getNumRequiredArgs();
846 auto AbiFI = llvm::abi::FunctionInfo::create(
848 MappedArgTypes, NumRequired);
853 convertABIArgInfo(AbiFI->getReturnInfo(), FI.
getReturnType());
855 for (
auto [CGArg, AbiArg] :
856 llvm::zip_equal(FI.
arguments(), AbiFI->arguments()))
857 CGArg.info = convertABIArgInfo(AbiArg.Info, CGArg.type);
860ABIArgInfo CodeGenModule::convertABIArgInfo(
const llvm::abi::ArgInfo &AbiInfo,
862 switch (AbiInfo.getKind()) {
863 case llvm::abi::ArgInfo::Direct: {
864 llvm::Type *CoercedType =
nullptr;
865 if (AbiInfo.getCoerceToType())
866 CoercedType = AbiReverseMapper->convertType(AbiInfo.getCoerceToType());
868 CoercedType = getTypes().ConvertType(
Type);
871 case llvm::abi::ArgInfo::Extend: {
872 llvm::Type *CoercedType =
nullptr;
873 if (AbiInfo.getCoerceToType())
874 CoercedType = AbiReverseMapper->convertType(AbiInfo.getCoerceToType());
876 CoercedType = getTypes().ConvertType(
Type);
877 if (AbiInfo.isSignExt())
879 if (AbiInfo.isZeroExt())
883 case llvm::abi::ArgInfo::Indirect: {
887 AbiInfo.getIndirectByVal(),
888 AbiInfo.getIndirectRealign());
890 case llvm::abi::ArgInfo::Ignore:
893 llvm_unreachable(
"Unexpected llvm::abi::ArgInfo kind");
904 assert(llvm::all_of(argTypes,
905 [](
CanQualType T) {
return T.isCanonicalAsParam(); }));
908 llvm::FoldingSetNodeID ID;
913 bool isDelegateCall =
916 info, paramInfos, required, resultType, argTypes);
918 void *insertPos =
nullptr;
919 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
927 info, paramInfos, resultType, argTypes, required);
928 FunctionInfos.InsertNode(FI, insertPos);
930 bool inserted = FunctionsBeingProcessed.insert(FI).second;
932 assert(inserted &&
"Recursively being processed?");
936 (CC == llvm::CallingConv::SPIR_KERNEL || CC == llvm::CallingConv::C)) {
945 }
else if (CGM.shouldUseLLVMABILowering()) {
946 CGM.computeABIInfoUsingLib(*FI);
948 CGM.getABIInfo().computeInfo(*FI);
959 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
962 bool erased = FunctionsBeingProcessed.erase(FI);
964 assert(erased &&
"Not in set?");
970 bool chainCall,
bool delegateCall,
976 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
980 void *buffer =
operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
981 argTypes.size() + 1, paramInfos.size()));
983 CGFunctionInfo *FI =
new (buffer) CGFunctionInfo();
984 FI->CallingConvention = llvmCC;
985 FI->EffectiveCallingConvention = llvmCC;
986 FI->ASTCallingConvention = info.
getCC();
987 FI->InstanceMethod = instanceMethod;
988 FI->ChainCall = chainCall;
989 FI->DelegateCall = delegateCall;
995 FI->Required = required;
998 FI->ArgStruct =
nullptr;
999 FI->ArgStructAlign = 0;
1000 FI->NumArgs = argTypes.size();
1001 FI->HasExtParameterInfos = !paramInfos.empty();
1002 FI->getArgsBuffer()[0].
type = resultType;
1003 FI->MaxVectorWidth = 0;
1004 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
1005 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
1006 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
1007 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
1017struct TypeExpansion {
1018 enum TypeExpansionKind {
1030 const TypeExpansionKind Kind;
1032 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
1033 virtual ~TypeExpansion() {}
1036struct ConstantArrayExpansion : TypeExpansion {
1040 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
1041 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
1042 static bool classof(
const TypeExpansion *TE) {
1043 return TE->Kind == TEK_ConstantArray;
1047struct RecordExpansion : TypeExpansion {
1048 SmallVector<const CXXBaseSpecifier *, 1> Bases;
1050 SmallVector<const FieldDecl *, 1> Fields;
1052 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
1053 SmallVector<const FieldDecl *, 1> &&Fields)
1054 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
1055 Fields(std::move(Fields)) {}
1056 static bool classof(
const TypeExpansion *TE) {
1057 return TE->Kind == TEK_Record;
1061struct ComplexExpansion : TypeExpansion {
1064 ComplexExpansion(QualType EltTy) : TypeExpansion(
TEK_Complex), EltTy(EltTy) {}
1065 static bool classof(
const TypeExpansion *TE) {
1070struct NoExpansion : TypeExpansion {
1071 NoExpansion() : TypeExpansion(TEK_None) {}
1072 static bool classof(
const TypeExpansion *TE) {
return TE->Kind == TEK_None; }
1076static std::unique_ptr<TypeExpansion>
1079 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
1085 assert(!RD->hasFlexibleArrayMember() &&
1086 "Cannot expand structure with flexible array.");
1087 if (RD->isUnion()) {
1093 for (
const auto *FD : RD->fields()) {
1094 if (FD->isZeroLengthBitField())
1096 assert(!FD->isBitField() &&
1097 "Cannot expand structure with bit-field members.");
1098 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
1099 if (UnionSize < FieldSize) {
1100 UnionSize = FieldSize;
1105 Fields.push_back(LargestFD);
1107 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1108 assert(!CXXRD->isDynamicClass() &&
1109 "cannot expand vtable pointers in dynamic classes");
1110 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
1113 for (
const auto *FD : RD->fields()) {
1114 if (FD->isZeroLengthBitField())
1116 assert(!FD->isBitField() &&
1117 "Cannot expand structure with bit-field members.");
1118 Fields.push_back(FD);
1121 return std::make_unique<RecordExpansion>(std::move(Bases),
1125 return std::make_unique<ComplexExpansion>(CT->getElementType());
1127 return std::make_unique<NoExpansion>();
1132 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1135 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1137 for (
auto BS : RExp->Bases)
1139 for (
auto FD : RExp->Fields)
1152 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1153 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1156 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1157 for (
auto BS : RExp->Bases)
1159 for (
auto FD : RExp->Fields)
1161 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1172 ConstantArrayExpansion *CAE,
1174 llvm::function_ref<
void(
Address)> Fn) {
1175 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1181void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1182 llvm::Function::arg_iterator &AI) {
1183 assert(LV.isSimple() &&
1184 "Unexpected non-simple lvalue during struct expansion.");
1187 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1189 *
this, CAExp, LV.getAddress(), [&](Address EltAddr) {
1190 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1191 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1193 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1195 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1199 false, SourceLocation());
1200 LValue SubLV = MakeAddrLValue(Base, BS->
getType());
1203 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1205 for (
auto FD : RExp->Fields) {
1207 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1208 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1211 auto realValue = &*AI++;
1212 auto imagValue = &*AI++;
1213 EmitStoreOfComplex(
ComplexPairTy(realValue, imagValue), LV,
true);
1218 llvm::Value *Arg = &*AI++;
1219 if (LV.isBitField()) {
1225 if (Arg->getType()->isPointerTy()) {
1227 Arg = Builder.CreateBitCast(Arg,
Addr.getElementType());
1229 EmitStoreOfScalar(Arg, LV);
1234void CodeGenFunction::ExpandTypeToArgs(
1235 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1236 SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos) {
1238 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1243 CallArg(convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1245 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1248 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1251 for (
const CXXBaseSpecifier *BS : RExp->Bases) {
1255 false, SourceLocation());
1259 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1263 LValue LV = MakeAddrLValue(This, Ty);
1264 for (
auto FD : RExp->Fields) {
1266 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1267 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1272 IRCallArgs[IRCallArgPos++] = CV.first;
1273 IRCallArgs[IRCallArgPos++] = CV.second;
1277 assert(RV.isScalar() &&
1278 "Unexpected non-scalar rvalue during struct expansion.");
1281 llvm::Value *
V = RV.getScalarVal();
1282 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1283 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1284 V = Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1286 IRCallArgs[IRCallArgPos++] =
V;
1294 const Twine &Name =
"tmp") {
1307 llvm::StructType *SrcSTy,
1311 if (SrcSTy->getNumElements() == 0)
1320 uint64_t FirstEltSize = CGF.
CGM.
getDataLayout().getTypeStoreSize(FirstElt);
1321 if (FirstEltSize < DstSize &&
1330 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1345 if (Val->getType() == Ty)
1351 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1357 llvm::Type *DestIntTy = Ty;
1361 if (Val->getType() != DestIntTy) {
1363 if (DL.isBigEndian()) {
1366 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1367 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1369 if (SrcSize > DstSize) {
1370 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1371 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1373 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1374 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1378 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1383 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1390 if (PFPFields.empty())
1393 auto LoadCoercedField = [&](
CharUnits Offset,
1394 llvm::Type *FieldType) -> llvm::Value * {
1399 if (!PFPFields.empty() && PFPFields[0].Offset == Offset) {
1403 FieldVal = CGF.
Builder.CreatePtrToInt(FieldVal, FieldType);
1404 PFPFields.erase(PFPFields.begin());
1421 Val = CGF.
Builder.CreatePtrToInt(Val, Ty);
1425 auto *ET = AT->getElementType();
1429 llvm::Value *Val = llvm::PoisonValue::get(AT);
1430 for (
unsigned Idx = 0; Idx != AT->getNumElements(); ++Idx, Offset += WordSize)
1431 Val = CGF.
Builder.CreateInsertValue(Val, LoadCoercedField(Offset, ET), Idx);
1455 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1457 DstSize.getFixedValue(), CGF);
1472 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1473 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1487 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1488 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1491 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1492 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1493 ScalableDstTy = llvm::ScalableVectorType::get(
1494 FixedSrcTy->getElementType(),
1496 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
1498 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1500 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1502 ScalableDstTy, PoisonVec, Load, uint64_t(0),
"cast.scalable");
1504 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, Ty));
1505 if (
Result->getType() != ScalableDstTy)
1507 if (
Result->getType() != Ty)
1520 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1527 if (PFPFields.empty())
1530 llvm::Type *SrcTy = Src->getType();
1531 auto StoreCoercedField = [&](
CharUnits Offset, llvm::Value *FieldVal) {
1532 if (!PFPFields.empty() && PFPFields[0].Offset == Offset) {
1537 PFPFields.erase(PFPFields.begin());
1557 auto *ET = AT->getElementType();
1561 for (
unsigned i = 0; i != AT->getNumElements(); ++i, Offset += WordSize)
1562 StoreCoercedField(Offset, CGF.
Builder.CreateExtractValue(Src, i));
1568 Address Dst, llvm::TypeSize DstSize,
1569 bool DstIsVolatile) {
1573 llvm::Type *SrcTy = Src->getType();
1574 llvm::TypeSize SrcSize =
CGM.getDataLayout().getTypeAllocSize(SrcTy);
1580 if (llvm::StructType *DstSTy =
1582 assert(!SrcSize.isScalable());
1584 SrcSize.getFixedValue(), *
this);
1591 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1592 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1596 auto *I =
Builder.CreateStore(Src, Dst, DstIsVolatile);
1598 }
else if (llvm::StructType *STy =
1599 dyn_cast<llvm::StructType>(Src->getType())) {
1602 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1604 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1605 auto *I =
Builder.CreateStore(Elt, EltPtr, DstIsVolatile);
1613 }
else if (SrcTy->isIntegerTy()) {
1615 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1632 Builder.CreateStore(Src, Tmp);
1633 auto *I =
Builder.CreateMemCpy(
1652static std::pair<llvm::Value *, bool>
1654 llvm::ScalableVectorType *FromTy, llvm::Value *
V,
1655 StringRef Name =
"") {
1658 if (FromTy->getElementType()->isIntegerTy(1) &&
1659 ToTy->getElementType() == CGF.
Builder.getInt8Ty()) {
1660 if (!FromTy->getElementCount().isKnownMultipleOf(8)) {
1661 FromTy = llvm::ScalableVectorType::get(
1662 FromTy->getElementType(),
1663 llvm::alignTo<8>(FromTy->getElementCount().getKnownMinValue()));
1664 llvm::Value *ZeroVec = llvm::Constant::getNullValue(FromTy);
1665 V = CGF.
Builder.CreateInsertVector(FromTy, ZeroVec,
V, uint64_t(0));
1667 FromTy = llvm::ScalableVectorType::get(
1668 ToTy->getElementType(),
1669 FromTy->getElementCount().getKnownMinValue() / 8);
1670 V = CGF.
Builder.CreateBitCast(
V, FromTy);
1672 if (FromTy->getElementType() == ToTy->getElementType()) {
1673 V->setName(Name +
".coerce");
1674 V = CGF.
Builder.CreateExtractVector(ToTy,
V, uint64_t(0),
"cast.fixed");
1684class ClangToLLVMArgMapping {
1685 static const unsigned InvalidIndex = ~0U;
1686 unsigned InallocaArgNo;
1688 unsigned TotalIRArgs;
1692 unsigned PaddingArgIndex;
1695 unsigned FirstArgIndex;
1696 unsigned NumberOfArgs;
1699 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1703 SmallVector<IRArgs, 8> ArgInfo;
1706 ClangToLLVMArgMapping(
const ASTContext &Context,
const CGFunctionInfo &FI,
1707 bool OnlyRequiredArgs =
false)
1708 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1709 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1710 construct(Context, FI, OnlyRequiredArgs);
1713 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1714 unsigned getInallocaArgNo()
const {
1715 assert(hasInallocaArg());
1716 return InallocaArgNo;
1719 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1720 unsigned getSRetArgNo()
const {
1721 assert(hasSRetArg());
1725 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1727 bool hasPaddingArg(
unsigned ArgNo)
const {
1728 assert(ArgNo < ArgInfo.size());
1729 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1731 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1732 assert(hasPaddingArg(ArgNo));
1733 return ArgInfo[ArgNo].PaddingArgIndex;
1738 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1739 assert(ArgNo < ArgInfo.size());
1740 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1741 ArgInfo[ArgNo].NumberOfArgs);
1745 void construct(
const ASTContext &Context,
const CGFunctionInfo &FI,
1746 bool OnlyRequiredArgs);
1749void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1750 const CGFunctionInfo &FI,
1751 bool OnlyRequiredArgs) {
1752 unsigned IRArgNo = 0;
1753 bool SwapThisWithSRet =
false;
1758 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1766 QualType ArgType = I->type;
1767 const ABIArgInfo &AI = I->info;
1769 auto &IRArgs = ArgInfo[ArgNo];
1772 IRArgs.PaddingArgIndex = IRArgNo++;
1779 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1781 IRArgs.NumberOfArgs = STy->getNumElements();
1783 IRArgs.NumberOfArgs = 1;
1789 IRArgs.NumberOfArgs = 1;
1794 IRArgs.NumberOfArgs = 0;
1804 if (IRArgs.NumberOfArgs > 0) {
1805 IRArgs.FirstArgIndex = IRArgNo;
1806 IRArgNo += IRArgs.NumberOfArgs;
1811 if (IRArgNo == 1 && SwapThisWithSRet)
1814 assert(ArgNo == ArgInfo.size());
1817 InallocaArgNo = IRArgNo++;
1819 TotalIRArgs = IRArgNo;
1827 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1842 switch (BT->getKind()) {
1845 case BuiltinType::Float:
1847 case BuiltinType::Double:
1849 case BuiltinType::LongDouble:
1860 if (BT->getKind() == BuiltinType::LongDouble)
1861 return getTarget().useObjCFP2RetForComplexLongDouble();
1875 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1877 assert(Inserted &&
"Recursively being processed?");
1879 llvm::Type *resultType =
nullptr;
1884 llvm_unreachable(
"Invalid ABI kind for return argument");
1896 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1897 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1913 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1917 if (IRFunctionArgs.hasSRetArg()) {
1918 ArgTypes[IRFunctionArgs.getSRetArgNo()] = llvm::PointerType::get(
1923 if (IRFunctionArgs.hasInallocaArg())
1924 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1931 for (; it != ie; ++it, ++ArgNo) {
1935 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1936 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1939 unsigned FirstIRArg, NumIRArgs;
1940 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1945 assert(NumIRArgs == 0);
1949 assert(NumIRArgs == 1);
1951 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1955 assert(NumIRArgs == 1);
1956 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1965 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1967 assert(NumIRArgs == st->getNumElements());
1968 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1969 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1971 assert(NumIRArgs == 1);
1972 ArgTypes[FirstIRArg] = argType;
1978 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1980 *ArgTypesIter++ = EltTy;
1982 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1987 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1989 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1994 bool Erased = FunctionsBeingProcessed.erase(&FI);
1996 assert(Erased &&
"Not in set?");
1998 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
2012 llvm::AttrBuilder &FuncAttrs,
2019 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2023 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
2025 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
2027 FuncAttrs.addAttribute(
"aarch64_za_state_agnostic");
2031 FuncAttrs.addAttribute(
"aarch64_preserves_za");
2033 FuncAttrs.addAttribute(
"aarch64_in_za");
2035 FuncAttrs.addAttribute(
"aarch64_out_za");
2037 FuncAttrs.addAttribute(
"aarch64_inout_za");
2041 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
2043 FuncAttrs.addAttribute(
"aarch64_in_zt0");
2045 FuncAttrs.addAttribute(
"aarch64_out_zt0");
2047 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
2051 const Decl *Callee) {
2057 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
2058 AA->getAssumption().split(Attrs,
",");
2061 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
2062 llvm::join(Attrs.begin(), Attrs.end(),
","));
2069 if (
const RecordType *RT =
2071 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
2072 return ClassDecl->hasTrivialDestructor();
2078 const Decl *TargetDecl) {
2084 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
2088 if (!
Module.getLangOpts().CPlusPlus)
2091 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
2092 if (FDecl->isExternC())
2094 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
2096 if (VDecl->isExternC())
2104 return Module.getCodeGenOpts().StrictReturn ||
2105 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
2106 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
2113 llvm::DenormalMode FP32DenormalMode,
2114 llvm::AttrBuilder &FuncAttrs) {
2115 llvm::DenormalFPEnv FPEnv(FPDenormalMode, FP32DenormalMode);
2116 if (FPEnv != llvm::DenormalFPEnv::getDefault())
2117 FuncAttrs.addDenormalFPEnvAttr(FPEnv);
2125 llvm::AttrBuilder &FuncAttrs) {
2131 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
2133 llvm::AttrBuilder &FuncAttrs) {
2136 if (CodeGenOpts.OptimizeSize)
2137 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
2138 if (CodeGenOpts.OptimizeSize == 2)
2139 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
2142 if (CodeGenOpts.DisableRedZone)
2143 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
2144 if (CodeGenOpts.IndirectTlsSegRefs)
2145 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
2146 if (CodeGenOpts.NoImplicitFloat)
2147 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
2149 if (AttrOnCallSite) {
2154 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
2156 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
2158 switch (CodeGenOpts.getFramePointer()) {
2166 FuncAttrs.addAttribute(
"frame-pointer",
2168 CodeGenOpts.getFramePointer()));
2171 if (CodeGenOpts.LessPreciseFPMAD)
2172 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
2174 if (CodeGenOpts.NullPointerIsValid)
2175 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
2178 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
2182 if (CodeGenOpts.SoftFloat)
2183 FuncAttrs.addAttribute(
"use-soft-float",
"true");
2184 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
2185 llvm::utostr(CodeGenOpts.SSPBufferSize));
2186 if (LangOpts.NoSignedZero)
2187 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
2190 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
2191 if (!Recips.empty())
2192 FuncAttrs.addAttribute(
"reciprocal-estimates", llvm::join(Recips,
","));
2196 FuncAttrs.addAttribute(
"prefer-vector-width",
2199 if (CodeGenOpts.StackRealignment)
2200 FuncAttrs.addAttribute(
"stackrealign");
2201 if (CodeGenOpts.Backchain)
2202 FuncAttrs.addAttribute(
"backchain");
2203 if (CodeGenOpts.EnableSegmentedStacks)
2204 FuncAttrs.addAttribute(
"split-stack");
2206 if (CodeGenOpts.SpeculativeLoadHardening)
2207 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2210 switch (CodeGenOpts.getZeroCallUsedRegs()) {
2211 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
2212 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2214 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
2215 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
2217 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
2218 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
2220 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
2221 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
2223 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
2224 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
2226 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
2227 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
2229 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2230 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2232 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2233 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2235 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2236 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2247 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2252 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2253 LangOpts.SYCLIsDevice) {
2254 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2257 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2258 FuncAttrs.addAttribute(
"save-reg-params");
2261 StringRef Var,
Value;
2263 FuncAttrs.addAttribute(Var,
Value);
2277 const llvm::Function &F,
2279 auto FFeatures = F.getFnAttribute(
"target-features");
2281 llvm::StringSet<> MergedNames;
2283 MergedFeatures.reserve(TargetOpts.
Features.size());
2285 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2286 for (StringRef
Feature : FeatureRange) {
2290 StringRef Name =
Feature.drop_front(1);
2291 bool Merged = !MergedNames.insert(Name).second;
2293 MergedFeatures.push_back(
Feature);
2297 if (FFeatures.isValid())
2298 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2299 AddUnmergedFeatures(TargetOpts.
Features);
2301 if (!MergedFeatures.empty()) {
2302 llvm::sort(MergedFeatures);
2303 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2310 bool WillInternalize) {
2312 llvm::AttrBuilder FuncAttrs(F.getContext());
2315 if (!TargetOpts.
CPU.empty())
2316 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2317 if (!TargetOpts.
TuneCPU.empty())
2318 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2321 CodeGenOpts, LangOpts,
2324 if (!WillInternalize && F.isInterposable()) {
2329 F.addFnAttrs(FuncAttrs);
2333 llvm::AttributeMask AttrsToRemove;
2337 llvm::DenormalFPEnv MergedFPEnv =
2338 OptsFPEnv.mergeCalleeMode(F.getDenormalFPEnv());
2340 if (MergedFPEnv == llvm::DenormalFPEnv::getDefault()) {
2341 AttrsToRemove.addAttribute(llvm::Attribute::DenormalFPEnv);
2344 FuncAttrs.addDenormalFPEnvAttr(MergedFPEnv);
2347 F.removeFnAttrs(AttrsToRemove);
2351 F.addFnAttrs(FuncAttrs);
2354void CodeGenModule::getTrivialDefaultFunctionAttributes(
2355 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2356 llvm::AttrBuilder &FuncAttrs) {
2358 getLangOpts(), AttrOnCallSite,
2362void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2364 bool AttrOnCallSite,
2365 llvm::AttrBuilder &FuncAttrs) {
2369 if (!AttrOnCallSite)
2375 if (!AttrOnCallSite)
2380 llvm::AttrBuilder &attrs) {
2381 getDefaultFunctionAttributes(
"",
false,
2383 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2388 const NoBuiltinAttr *NBA =
nullptr) {
2389 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2391 AttributeName +=
"no-builtin-";
2392 AttributeName += BuiltinName;
2393 FuncAttrs.addAttribute(AttributeName);
2397 if (LangOpts.NoBuiltin) {
2399 FuncAttrs.addAttribute(
"no-builtins");
2413 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2414 FuncAttrs.addAttribute(
"no-builtins");
2419 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2423 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2424 bool CheckCoerce =
true) {
2431 if (!DL.typeSizeEqualsStoreSize(Ty))
2438 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2439 DL.getTypeSizeInBits(Ty)))
2463 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2474 unsigned NumRequiredArgs,
unsigned ArgNo) {
2475 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2480 if (ArgNo >= NumRequiredArgs)
2484 if (ArgNo < FD->getNumParams()) {
2485 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2486 if (Param && Param->hasAttr<MaybeUndefAttr>())
2503 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2506 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2508 llvm::all_of(ST->elements(),
2509 llvm::AttributeFuncs::isNoFPClassCompatibleType);
2517 llvm::FPClassTest Mask = llvm::fcNone;
2518 if (LangOpts.NoHonorInfs)
2519 Mask |= llvm::fcInf;
2520 if (LangOpts.NoHonorNaNs)
2521 Mask |= llvm::fcNan;
2527 llvm::AttributeList &Attrs) {
2528 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2529 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2530 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2556 llvm::AttributeList &AttrList,
2558 bool AttrOnCallSite,
bool IsThunk) {
2566 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2568 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2579 bool HasOptnone =
false;
2581 const NoBuiltinAttr *NBA =
nullptr;
2585 std::optional<llvm::Attribute::AttrKind> MemAttrForPtrArgs;
2586 bool AddedPotentialArgAccess =
false;
2587 auto AddPotentialArgAccess = [&]() {
2588 AddedPotentialArgAccess =
true;
2589 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2591 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2592 llvm::MemoryEffects::argMemOnly());
2599 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2600 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2601 if (TargetDecl->
hasAttr<NoThrowAttr>())
2602 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2603 if (TargetDecl->
hasAttr<NoReturnAttr>())
2604 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2605 if (TargetDecl->
hasAttr<ColdAttr>())
2606 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2607 if (TargetDecl->
hasAttr<HotAttr>())
2608 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2609 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2610 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2611 if (TargetDecl->
hasAttr<ConvergentAttr>())
2612 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2614 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2617 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2619 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2621 (Kind == OO_New || Kind == OO_Array_New))
2622 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2625 const bool IsVirtualCall = MD && MD->
isVirtual();
2628 if (!(AttrOnCallSite && IsVirtualCall)) {
2629 if (Fn->isNoReturn())
2630 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2631 NBA = Fn->getAttr<NoBuiltinAttr>();
2638 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2639 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2643 if (TargetDecl->
hasAttr<ConstAttr>()) {
2644 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2645 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2648 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2649 MemAttrForPtrArgs = llvm::Attribute::ReadNone;
2650 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2651 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2652 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2654 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2655 MemAttrForPtrArgs = llvm::Attribute::ReadOnly;
2656 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2657 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2658 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2660 if (
const auto *RA = TargetDecl->
getAttr<RestrictAttr>();
2661 RA && RA->getDeallocator() ==
nullptr)
2662 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2663 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2664 !CodeGenOpts.NullPointerIsValid)
2665 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2666 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2667 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2668 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2669 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2670 if (TargetDecl->
hasAttr<LeafAttr>())
2671 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2672 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2673 FuncAttrs.addAttribute(
"bpf_fastcall");
2675 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2676 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2677 std::optional<unsigned> NumElemsParam;
2678 if (AllocSize->getNumElemsParam().isValid())
2679 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2680 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2690 FuncAttrs.addAttribute(
"uniform-work-group-size");
2692 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2693 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2695 if (
auto *ModularFormat = TargetDecl->
getAttr<ModularFormatAttr>()) {
2696 FormatAttr *Format = TargetDecl->
getAttr<FormatAttr>();
2697 StringRef
Type = Format->getType()->getName();
2698 std::string FormatIdx = std::to_string(Format->getFormatIdx());
2699 std::string FirstArg = std::to_string(Format->getFirstArg());
2701 Type, FormatIdx, FirstArg,
2702 ModularFormat->getModularImplFn()->getName(),
2703 ModularFormat->getImplName()};
2704 llvm::append_range(Args, ModularFormat->aspects());
2705 FuncAttrs.addAttribute(
"modular-format", llvm::join(Args,
","));
2718 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2723 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2724 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2725 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2726 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2727 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2728 FuncAttrs.removeAttribute(
"split-stack");
2729 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2732 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2733 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2734 FuncAttrs.addAttribute(
2735 "zero-call-used-regs",
2736 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2743 if (CodeGenOpts.NoPLT) {
2744 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2745 if (!Fn->isDefined() && !AttrOnCallSite) {
2746 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2751 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2752 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2757 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2758 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2759 if (!FD->isExternallyVisible())
2760 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2767 if (!AttrOnCallSite) {
2768 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2769 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2772 auto shouldDisableTailCalls = [&] {
2774 if (CodeGenOpts.DisableTailCalls)
2780 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2781 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2784 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2785 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2786 if (!BD->doesNotEscape())
2792 if (shouldDisableTailCalls())
2793 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2798 static const llvm::StringSet<> ReturnsTwiceFn{
2799 "_setjmpex",
"setjmp",
"_setjmp",
"vfork",
2800 "sigsetjmp",
"__sigsetjmp",
"savectx",
"getcontext"};
2801 if (ReturnsTwiceFn.contains(Name))
2802 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2806 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2809 if (!MSHotPatchFunctions.empty()) {
2810 bool IsHotPatched = llvm::binary_search(MSHotPatchFunctions, Name);
2812 FuncAttrs.addAttribute(
"marked_for_windows_hot_patching");
2817 if (CodeGenOpts.isLoaderReplaceableFunctionName(Name))
2818 FuncAttrs.addAttribute(
"loader-replaceable");
2821 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2828 if (CodeGenOpts.EnableNoundefAttrs &&
2832 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2838 RetAttrs.addAttribute(llvm::Attribute::SExt);
2840 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2842 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2847 RetAttrs.addAttribute(llvm::Attribute::InReg);
2859 AddPotentialArgAccess();
2868 llvm_unreachable(
"Invalid ABI kind for return argument");
2876 RetAttrs.addDereferenceableAttr(
2878 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2879 !CodeGenOpts.NullPointerIsValid)
2880 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2882 llvm::Align Alignment =
2884 RetAttrs.addAlignmentAttr(Alignment);
2889 bool hasUsedSRet =
false;
2893 if (IRFunctionArgs.hasSRetArg()) {
2895 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2896 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2897 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2900 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2902 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2907 if (IRFunctionArgs.hasInallocaArg()) {
2910 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2920 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2922 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2928 if (!CodeGenOpts.NullPointerIsValid &&
2930 Attrs.addAttribute(llvm::Attribute::NonNull);
2937 Attrs.addDereferenceableOrNullAttr(
2943 llvm::Align Alignment =
2947 Attrs.addAlignmentAttr(Alignment);
2949 const auto *DD = dyn_cast_if_present<CXXDestructorDecl>(
2963 CodeGenOpts.StrictLifetimes) {
2965 dyn_cast<CXXRecordDecl>(DD->getDeclContext());
2969 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo(
2970 Context.getASTRecordLayout(ClassDecl).getDataSize().getQuantity()));
2973 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2978 I != E; ++I, ++ArgNo) {
2984 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2986 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2989 .addAttribute(llvm::Attribute::InReg));
2994 if (CodeGenOpts.EnableNoundefAttrs &&
2996 Attrs.addAttribute(llvm::Attribute::NoUndef);
3005 Attrs.addAttribute(llvm::Attribute::SExt);
3007 Attrs.addAttribute(llvm::Attribute::ZExt);
3009 Attrs.addAttribute(llvm::Attribute::NoExt);
3014 Attrs.addAttribute(llvm::Attribute::Nest);
3016 Attrs.addAttribute(llvm::Attribute::InReg);
3017 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
3024 Attrs.addAttribute(llvm::Attribute::InReg);
3036 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
3044 Attrs.addDeadOnReturnAttr(llvm::DeadOnReturnInfo());
3049 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
3050 Decl->getArgPassingRestrictions() ==
3054 Attrs.addAttribute(llvm::Attribute::NoAlias);
3079 AddPotentialArgAccess();
3084 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
3095 AddPotentialArgAccess();
3103 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
3104 !CodeGenOpts.NullPointerIsValid)
3105 Attrs.addAttribute(llvm::Attribute::NonNull);
3107 llvm::Align Alignment =
3109 Attrs.addAlignmentAttr(Alignment);
3118 DeviceKernelAttr::isOpenCLSpelling(
3119 TargetDecl->
getAttr<DeviceKernelAttr>()) &&
3123 llvm::Align Alignment =
3125 Attrs.addAlignmentAttr(Alignment);
3132 Attrs.addAttribute(llvm::Attribute::NoAlias);
3141 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
3146 Attrs.addAttribute(llvm::Attribute::NoAlias);
3150 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
3151 auto info =
getContext().getTypeInfoInChars(PTy);
3152 Attrs.addDereferenceableAttr(info.Width.getQuantity());
3153 Attrs.addAlignmentAttr(info.Align.getAsAlign());
3159 Attrs.addAttribute(llvm::Attribute::SwiftError);
3163 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
3167 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
3172 Attrs.addCapturesAttr(llvm::CaptureInfo::none());
3174 if (Attrs.hasAttributes()) {
3175 unsigned FirstIRArg, NumIRArgs;
3176 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3177 for (
unsigned i = 0; i < NumIRArgs; i++)
3178 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
3187 AddPotentialArgAccess();
3190 if (AddedPotentialArgAccess && MemAttrForPtrArgs) {
3194 I != E; ++I, ++ArgNo) {
3195 if (I->info.isDirect() || I->info.isExpand() ||
3196 I->info.isCoerceAndExpand()) {
3197 unsigned FirstIRArg, NumIRArgs;
3198 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3199 for (
unsigned i = FirstIRArg; i < FirstIRArg + NumIRArgs; ++i) {
3206 if (i < FunctionType->getNumParams() &&
3216 AttrList = llvm::AttributeList::get(
3225 llvm::Value *value) {
3226 llvm::Type *varType = CGF.
ConvertType(var->getType());
3230 if (value->getType() == varType)
3233 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) &&
3234 "unexpected promotion type");
3237 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
3239 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
3245 QualType ArgType,
unsigned ArgNo) {
3253 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
3257 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
3264 if (NNAttr->isNonNull(ArgNo))
3271struct CopyBackSwiftError final : EHScopeStack::Cleanup {
3274 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(
arg) {}
3275 void Emit(CodeGenFunction &CGF, Flags flags)
override {
3294 if (FD->hasImplicitReturnZero()) {
3295 QualType RetTy = FD->getReturnType().getUnqualifiedType();
3296 llvm::Type *LLVMTy =
CGM.getTypes().ConvertType(RetTy);
3297 llvm::Constant *
Zero = llvm::Constant::getNullValue(LLVMTy);
3305 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), FI);
3306 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
3311 if (IRFunctionArgs.hasInallocaArg())
3312 ArgStruct =
Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
3316 if (IRFunctionArgs.hasSRetArg()) {
3317 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
3318 AI->setName(
"agg.result");
3319 AI->addAttr(llvm::Attribute::NoAlias);
3326 ArgVals.reserve(Args.size());
3332 assert(FI.
arg_size() == Args.size() &&
3333 "Mismatch between function signature & arguments.");
3336 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e;
3337 ++i, ++info_it, ++ArgNo) {
3350 unsigned FirstIRArg, NumIRArgs;
3351 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3355 assert(NumIRArgs == 0);
3368 assert(NumIRArgs == 1);
3391 llvm::ConstantInt::get(
IntPtrTy, Size.getQuantity()));
3392 ParamAddr = AlignedTemp;
3409 auto AI = Fn->getArg(FirstIRArg);
3417 assert(NumIRArgs == 1);
3419 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3422 PVD->getFunctionScopeIndex()) &&
3423 !
CGM.getCodeGenOpts().NullPointerIsValid)
3424 AI->addAttr(llvm::Attribute::NonNull);
3426 QualType OTy = PVD->getOriginalType();
3427 if (
const auto *ArrTy =
getContext().getAsConstantArrayType(OTy)) {
3433 QualType ETy = ArrTy->getElementType();
3434 llvm::Align Alignment =
3435 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3437 .addAlignmentAttr(Alignment));
3438 uint64_t ArrSize = ArrTy->getZExtSize();
3442 Attrs.addDereferenceableAttr(
3443 getContext().getTypeSizeInChars(ETy).getQuantity() *
3445 AI->addAttrs(Attrs);
3446 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3448 !
CGM.getCodeGenOpts().NullPointerIsValid) {
3449 AI->addAttr(llvm::Attribute::NonNull);
3452 }
else if (
const auto *ArrTy =
3458 QualType ETy = ArrTy->getElementType();
3459 llvm::Align Alignment =
3460 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3462 .addAlignmentAttr(Alignment));
3463 if (!
getTypes().getTargetAddressSpace(ETy) &&
3464 !
CGM.getCodeGenOpts().NullPointerIsValid)
3465 AI->addAttr(llvm::Attribute::NonNull);
3470 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3473 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3474 if (AVAttr && !
SanOpts.has(SanitizerKind::Alignment)) {
3478 llvm::ConstantInt *AlignmentCI =
3480 uint64_t AlignmentInt =
3481 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3482 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3483 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3485 .addAlignmentAttr(llvm::Align(AlignmentInt)));
3492 AI->addAttr(llvm::Attribute::NoAlias);
3500 assert(NumIRArgs == 1);
3504 llvm::Value *
V = AI;
3512 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3513 llvm::Value *incomingErrorValue =
Builder.CreateLoad(arg);
3514 Builder.CreateStore(incomingErrorValue, temp);
3535 if (
V->getType() != LTy)
3546 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3547 llvm::Value *ArgVal = Fn->getArg(FirstIRArg);
3548 if (
auto *VecTyFrom =
3549 dyn_cast<llvm::ScalableVectorType>(ArgVal->getType())) {
3551 *
this, VecTyTo, VecTyFrom, ArgVal, Arg->
getName());
3553 assert(NumIRArgs == 1);
3560 llvm::StructType *STy =
3571 STy->getNumElements() > 1) {
3572 llvm::TypeSize StructSize =
CGM.getDataLayout().getTypeAllocSize(STy);
3573 llvm::TypeSize PtrElementSize =
3575 if (StructSize.isScalable()) {
3576 assert(STy->containsHomogeneousScalableVectorTypes() &&
3577 "ABI only supports structure with homogeneous scalable vector "
3579 assert(StructSize == PtrElementSize &&
3580 "Only allow non-fractional movement of structure with"
3581 "homogeneous scalable vector type");
3582 assert(STy->getNumElements() == NumIRArgs);
3584 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3585 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3586 auto *AI = Fn->getArg(FirstIRArg + i);
3587 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3589 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3592 Builder.CreateStore(LoadedStructValue, Ptr);
3594 uint64_t SrcSize = StructSize.getFixedValue();
3595 uint64_t DstSize = PtrElementSize.getFixedValue();
3598 if (SrcSize <= DstSize) {
3605 assert(STy->getNumElements() == NumIRArgs);
3606 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3607 auto AI = Fn->getArg(FirstIRArg + i);
3608 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3610 Builder.CreateStore(AI, EltPtr);
3613 if (SrcSize > DstSize) {
3614 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3626 assert(NumIRArgs == 1);
3627 auto AI = Fn->getArg(FirstIRArg);
3628 AI->setName(Arg->
getName() +
".coerce");
3631 llvm::TypeSize::getFixed(
3632 getContext().getTypeSizeInChars(Ty).getQuantity() -
3657 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3661 unsigned argIndex = FirstIRArg;
3662 unsigned unpaddedIndex = 0;
3663 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3664 llvm::Type *eltType = coercionType->getElementType(i);
3668 auto eltAddr =
Builder.CreateStructGEP(alloca, i);
3669 llvm::Value *elt = Fn->getArg(argIndex++);
3671 auto paramType = unpaddedStruct
3672 ? unpaddedStruct->getElementType(unpaddedIndex++)
3673 : unpaddedCoercionType;
3675 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(eltType)) {
3676 if (
auto *VecTyFrom = dyn_cast<llvm::ScalableVectorType>(paramType)) {
3679 *
this, VecTyTo, VecTyFrom, elt, elt->getName());
3680 assert(Extracted &&
"Unexpected scalable to fixed vector coercion");
3683 Builder.CreateStore(elt, eltAddr);
3685 assert(argIndex == FirstIRArg + NumIRArgs);
3697 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3698 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3699 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3700 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3701 auto AI = Fn->getArg(FirstIRArg + i);
3702 AI->setName(Arg->
getName() +
"." + Twine(i));
3708 auto *AI = Fn->getArg(FirstIRArg);
3709 AI->setName(Arg->
getName() +
".target_coerce");
3713 CGM.getABIInfo().createCoercedStore(AI, Ptr, ArgI,
false, *
this);
3727 assert(NumIRArgs == 0);
3739 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3740 for (
int I = Args.size() - 1; I >= 0; --I)
3743 for (
unsigned I = 0, E = Args.size(); I != E; ++I)
3749 while (insn->use_empty()) {
3750 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3756 bitcast->eraseFromParent();
3762 llvm::Value *result) {
3764 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3767 if (&BB->back() != result)
3770 llvm::Type *resultType = result->getType();
3779 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3785 if (generator->getNextNode() != bitcast)
3788 InstsToKill.push_back(bitcast);
3795 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3799 bool doRetainAutorelease;
3802 doRetainAutorelease =
true;
3803 }
else if (call->getCalledOperand() ==
3805 doRetainAutorelease =
false;
3813 llvm::Instruction *prev = call->getPrevNode();
3816 prev = prev->getPrevNode();
3822 InstsToKill.push_back(prev);
3828 result = call->getArgOperand(0);
3829 InstsToKill.push_back(call);
3833 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3834 if (!bitcast->hasOneUse())
3836 InstsToKill.push_back(bitcast);
3837 result = bitcast->getOperand(0);
3841 for (
auto *I : InstsToKill)
3842 I->eraseFromParent();
3845 if (doRetainAutorelease)
3849 return CGF.
Builder.CreateBitCast(result, resultType);
3854 llvm::Value *result) {
3857 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3866 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3867 if (!retainCall || retainCall->getCalledOperand() !=
3872 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3873 llvm::LoadInst *load =
3874 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3875 if (!load || load->isAtomic() || load->isVolatile() ||
3882 llvm::Type *resultType = result->getType();
3884 assert(retainCall->use_empty());
3885 retainCall->eraseFromParent();
3888 return CGF.
Builder.CreateBitCast(load, resultType);
3895 llvm::Value *result) {
3918 auto GetStoreIfValid = [&CGF,
3919 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3920 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3921 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3927 assert(!SI->isAtomic() &&
3935 if (!ReturnValuePtr->hasOneUse()) {
3936 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3942 const llvm::Instruction *LoadIntoFakeUse =
nullptr;
3943 for (llvm::Instruction &I : llvm::reverse(*IP)) {
3947 if (LoadIntoFakeUse == &I)
3951 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) {
3952 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3955 if (II->getIntrinsicID() == llvm::Intrinsic::fake_use) {
3956 LoadIntoFakeUse = dyn_cast<llvm::Instruction>(II->getArgOperand(0));
3960 return GetStoreIfValid(&I);
3965 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3971 llvm::BasicBlock *StoreBB = store->getParent();
3972 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3974 while (IP != StoreBB) {
3975 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3991 int BitWidth,
int CharWidth) {
3992 assert(CharWidth <= 64);
3993 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3996 if (BitOffset >= CharWidth) {
3997 Pos += BitOffset / CharWidth;
3998 BitOffset = BitOffset % CharWidth;
4001 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
4002 if (BitOffset + BitWidth >= CharWidth) {
4003 Bits[Pos++] |= (
Used << BitOffset) &
Used;
4004 BitWidth -= CharWidth - BitOffset;
4008 while (BitWidth >= CharWidth) {
4010 BitWidth -= CharWidth;
4014 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
4022 int StorageSize,
int BitOffset,
int BitWidth,
4023 int CharWidth,
bool BigEndian) {
4026 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
4029 std::reverse(TmpBits.begin(), TmpBits.end());
4031 for (uint64_t
V : TmpBits)
4032 Bits[StorageOffset++] |=
V;
4035static void setUsedBits(CodeGenModule &, QualType,
int,
4036 SmallVectorImpl<uint64_t> &);
4047 const RecordDecl *RD = RTy->getDecl()->getDefinition();
4078 QualType ETy = Context.getBaseElementType(ATy);
4079 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
4083 for (
int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
4084 auto Src = TmpBits.begin();
4085 auto Dst = Bits.begin() + Offset + I * Size;
4086 for (
int J = 0; J < Size; ++J)
4099 if (
const auto *ATy = Context.getAsConstantArrayType(QTy))
4102 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
4106 std::fill_n(Bits.begin() + Offset, Size,
4107 (uint64_t(1) << Context.getCharWidth()) - 1);
4111 int Pos,
int Size,
int CharWidth,
4116 for (
auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
4118 Mask = (Mask << CharWidth) | *P;
4120 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
4122 Mask = (Mask << CharWidth) | *--P;
4131 llvm::IntegerType *ITy,
4133 assert(Src->getType() == ITy);
4134 assert(ITy->getScalarSizeInBits() <= 64);
4136 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
4137 int Size = DataLayout.getTypeStoreSize(ITy);
4141 int CharWidth =
CGM.getContext().getCharWidth();
4145 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
4151 llvm::ArrayType *ATy,
4153 const llvm::DataLayout &DataLayout =
CGM.getDataLayout();
4154 int Size = DataLayout.getTypeStoreSize(ATy);
4159 int CharWidth =
CGM.getContext().getCharWidth();
4161 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
4163 llvm::Value *R = llvm::PoisonValue::get(ATy);
4164 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
4166 DataLayout.isBigEndian());
4167 MaskIndex += CharsPerElt;
4168 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
4169 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
4170 R =
Builder.CreateInsertValue(R, T1, I);
4178 uint64_t RetKeyInstructionsSourceAtom) {
4193 auto *I =
Builder.CreateRetVoid();
4194 if (RetKeyInstructionsSourceAtom)
4201 llvm::DebugLoc RetDbgLoc;
4202 llvm::Value *RV =
nullptr;
4212 llvm::Function::arg_iterator EI =
CurFn->arg_end();
4214 llvm::Value *ArgStruct = &*EI;
4215 llvm::Value *SRet =
Builder.CreateStructGEP(
4224 auto AI =
CurFn->arg_begin();
4242 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
4269 RetDbgLoc = SI->getDebugLoc();
4271 RV = SI->getValueOperand();
4272 SI->eraseFromParent();
4295 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
4296 RT = FD->getReturnType();
4297 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
4298 RT = MD->getReturnType();
4300 RT =
BlockInfo->BlockExpression->getFunctionType()->getReturnType();
4302 llvm_unreachable(
"Unexpected function/method type");
4318 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
4323 unsigned unpaddedIndex = 0;
4324 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4325 auto coercedEltType = coercionType->getElementType(i);
4329 auto eltAddr =
Builder.CreateStructGEP(addr, i);
4332 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
4333 : unpaddedCoercionType,
4335 results.push_back(elt);
4339 if (results.size() == 1) {
4347 RV = llvm::PoisonValue::get(returnType);
4348 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
4349 RV =
Builder.CreateInsertValue(RV, results[i], i);
4356 RV =
CGM.getABIInfo().createCoercedLoad(
V, RetAI, *
this);
4361 llvm_unreachable(
"Invalid ABI kind for return argument");
4364 llvm::Instruction *Ret;
4370 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4377 Ret =
Builder.CreateRetVoid();
4381 Ret->setDebugLoc(std::move(RetDbgLoc));
4383 llvm::Value *Backup = RV ? Ret->getOperand(0) :
nullptr;
4384 if (RetKeyInstructionsSourceAtom)
4400 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4401 if (
SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
4402 RetNNAttr =
CurCodeDecl->getAttr<ReturnsNonNullAttr>();
4404 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4412 assert(!requiresReturnValueNullabilityCheck() &&
4413 "Cannot check nullability and the nonnull attribute");
4414 AttrLoc = RetNNAttr->getLocation();
4415 CheckKind = SanitizerKind::SO_ReturnsNonnullAttribute;
4416 Handler = SanitizerHandler::NonnullReturn;
4418 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4419 if (
auto *TSI = DD->getTypeSourceInfo())
4421 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4422 CheckKind = SanitizerKind::SO_NullabilityReturn;
4423 Handler = SanitizerHandler::NullabilityReturn;
4432 llvm::Value *SLocPtr =
Builder.CreateLoad(ReturnLocation,
"return.sloc.load");
4433 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4434 if (requiresReturnValueNullabilityCheck())
4436 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4437 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4443 llvm::Value *DynamicData[] = {SLocPtr};
4444 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, DynamicData);
4463 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4464 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4489 if (
type->isReferenceType()) {
4498 param->
hasAttr<NSConsumedAttr>() &&
type->isObjCRetainableType()) {
4499 llvm::Value *ptr =
Builder.CreateLoad(local);
4502 Builder.CreateStore(null, local);
4513 type->castAsRecordDecl()->isParamDestroyedInCallee() &&
4518 "cleanup for callee-destructed param not recorded");
4520 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4526 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4536 const LValue &srcLV = writeback.
Source;
4537 Address srcAddr = srcLV.getAddress();
4539 "shouldn't have writeback for provably null argument");
4547 llvm::BasicBlock *contBB =
nullptr;
4553 if (!provablyNonNull) {
4558 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4567 "icr.writeback-cast");
4576 if (writeback.
ToUse) {
4601 if (!provablyNonNull)
4610 for (
const auto &I : llvm::reverse(Cleanups)) {
4612 I.IsActiveIP->eraseFromParent();
4618 if (uop->getOpcode() == UO_AddrOf)
4619 return uop->getSubExpr();
4644 Address srcAddr = srcLV.getAddress();
4649 llvm::PointerType *destType =
4651 llvm::Type *destElemType =
4678 llvm::BasicBlock *contBB =
nullptr;
4679 llvm::BasicBlock *originBB =
nullptr;
4682 llvm::Value *finalArgument;
4686 if (provablyNonNull) {
4691 finalArgument = CGF.
Builder.CreateSelect(
4692 isNull, llvm::ConstantPointerNull::get(destType),
4698 originBB = CGF.
Builder.GetInsertBlock();
4701 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4703 condEval.
begin(CGF);
4707 llvm::Value *valueToUse =
nullptr;
4715 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4732 if (shouldCopy && !provablyNonNull) {
4733 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4738 llvm::PHINode *phiToUse =
4739 CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
"icr.to-use");
4740 phiToUse->addIncoming(valueToUse, copyBB);
4741 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4743 valueToUse = phiToUse;
4757 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4763 CGF.
Builder.CreateStackRestore(StackBase);
4770 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4771 SanOpts.has(SanitizerKind::NullabilityArg)))
4776 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4779 const NonNullAttr *NNAttr =
nullptr;
4780 if (
SanOpts.has(SanitizerKind::NonnullAttribute))
4783 bool CanCheckNullability =
false;
4784 if (
SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4785 !PVD->getType()->isRecordType()) {
4786 auto Nullability = PVD->getType()->getNullability();
4787 CanCheckNullability = Nullability &&
4789 PVD->getTypeSourceInfo();
4792 if (!NNAttr && !CanCheckNullability)
4799 AttrLoc = NNAttr->getLocation();
4800 CheckKind = SanitizerKind::SO_NonnullAttribute;
4801 Handler = SanitizerHandler::NonnullArg;
4803 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4804 CheckKind = SanitizerKind::SO_NullabilityArg;
4805 Handler = SanitizerHandler::NullabilityArg;
4810 llvm::Constant *StaticData[] = {
4813 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4815 EmitCheck(std::make_pair(
Cond, CheckKind), Handler, StaticData, {});
4821 if (!AC.
getDecl() || !(
SanOpts.has(SanitizerKind::NonnullAttribute) ||
4822 SanOpts.has(SanitizerKind::NullabilityArg)))
4841 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4852 return classDecl->getTypeParamListAsWritten();
4856 return catDecl->getTypeParamList();
4866 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4870 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4871 "Can't skip parameters if type info is not provided");
4881 bool IsVariadic =
false;
4883 const auto *MD = dyn_cast<const ObjCMethodDecl *>(
Prototype.P);
4885 IsVariadic = MD->isVariadic();
4887 MD,
CGM.getTarget().getTriple().isOSWindows());
4888 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4889 MD->param_type_end());
4892 IsVariadic = FPT->isVariadic();
4893 ExplicitCC = FPT->getExtInfo().getCC();
4894 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4895 FPT->param_type_end());
4903 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4905 QualType ArgTy = (*Arg)->getType();
4906 if (
const auto *OBT = ParamTy->
getAs<OverflowBehaviorType>())
4907 ParamTy = OBT->getUnderlyingType();
4908 if (
const auto *OBT = ArgTy->
getAs<OverflowBehaviorType>())
4909 ArgTy = OBT->getUnderlyingType();
4912 getContext().getCanonicalType(ParamTy).getTypePtr() ==
4913 getContext().getCanonicalType(ArgTy).getTypePtr()) &&
4914 "type mismatch in call argument!");
4920 assert((Arg == ArgRange.end() || IsVariadic) &&
4921 "Extra arguments in non-variadic function!");
4926 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4927 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4928 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4936 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4940 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4949 auto SizeTy = Context.getSizeType();
4951 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4952 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(
4953 Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic());
4958 std::swap(Args.back(), *(&Args.back() - 1));
4964 "inalloca only supported on x86");
4969 size_t CallArgsStart = Args.size();
4970 for (
unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4971 unsigned Idx = LeftToRight ? I : E - I - 1;
4973 unsigned InitialArgSize = Args.size();
4977 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4981 "Argument and parameter types don't match");
4985 assert(InitialArgSize + 1 == Args.size() &&
4986 "The code below depends on only adding one arg per EmitCallArg");
4987 (void)InitialArgSize;
4990 if (!Args.back().hasLValue()) {
4991 RValue RVArg = Args.back().getKnownRValue();
4993 ParamsToSkip + Idx);
4997 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
5004 std::reverse(Args.begin() + CallArgsStart, Args.end());
5013struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
5046 if (!HasLV &&
RV.isScalar())
5048 else if (!HasLV &&
RV.isComplex())
5051 auto Addr = HasLV ?
LV.getAddress() :
RV.getAggregateAddress();
5055 HasLV ?
LV.isVolatileQualified()
5056 :
RV.isVolatileQualified());
5068 std::optional<DisableDebugLocationUpdates> Dis;
5072 dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
5086 "reference binding to unmaterialized r-value!");
5098 if (
type->isRecordType() &&
5099 type->castAsRecordDecl()->isParamDestroyedInCallee()) {
5106 bool DestroyedInCallee =
true, NeedsCleanup =
true;
5107 if (
const auto *RD =
type->getAsCXXRecordDecl())
5108 DestroyedInCallee = RD->hasNonTrivialDestructor();
5110 NeedsCleanup =
type.isDestructedType();
5112 if (DestroyedInCallee)
5119 if (DestroyedInCallee && NeedsCleanup) {
5126 llvm::Instruction *IsActive =
5135 !
type->isArrayParameterType() && !
type.isNonTrivialToPrimitiveCopy()) {
5145QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
5149 if (!getTarget().
getTriple().isOSWindows())
5153 getContext().getTypeSize(Arg->
getType()) <
5157 return getContext().getIntPtrType();
5165void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
5166 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
5167 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
5168 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
5169 CGM.getNoObjCARCExceptionsMetadata());
5175 const llvm::Twine &name) {
5176 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value *>(), name);
5182 ArrayRef<Address> args,
5183 const llvm::Twine &name) {
5184 SmallVector<llvm::Value *, 3> values;
5185 for (
auto arg : args)
5186 values.push_back(
arg.emitRawPointer(*
this));
5187 return EmitNounwindRuntimeCall(callee, values, name);
5192 ArrayRef<llvm::Value *> args,
5193 const llvm::Twine &name) {
5194 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
5195 call->setDoesNotThrow();
5202 const llvm::Twine &name) {
5203 return EmitRuntimeCall(callee, {},
name);
5208SmallVector<llvm::OperandBundleDef, 1>
5217 if (
auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
5218 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
5219 auto IID = CalleeFn->getIntrinsicID();
5220 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
5233 const llvm::Twine &name) {
5234 llvm::CallInst *call = Builder.CreateCall(
5235 callee, args, getBundlesForFunclet(callee.getCallee()), name);
5236 call->setCallingConv(getRuntimeCC());
5238 if (CGM.shouldEmitConvergenceTokens() && call->isConvergent())
5250 llvm::InvokeInst *invoke =
Builder.CreateInvoke(
5252 invoke->setDoesNotReturn();
5255 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
5256 call->setDoesNotReturn();
5265 const Twine &name) {
5273 const Twine &name) {
5283 const Twine &Name) {
5288 llvm::CallBase *Inst;
5290 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
5293 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
5300 if (
CGM.getLangOpts().ObjCAutoRefCount)
5301 AddObjCARCExceptionMetadata(Inst);
5306void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
5308 DeferredReplacements.push_back(
5309 std::make_pair(llvm::WeakTrackingVH(Old),
New));
5316[[nodiscard]] llvm::AttributeList
5317maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
5318 const llvm::AttributeList &Attrs,
5319 llvm::Align NewAlign) {
5320 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
5321 if (CurAlign >= NewAlign)
5323 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
5324 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
5325 .addRetAttribute(Ctx, AlignAttr);
5328template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
5333 const AlignedAttrTy *AA =
nullptr;
5335 llvm::Value *Alignment =
nullptr;
5336 llvm::ConstantInt *OffsetCI =
nullptr;
5342 AA = FuncDecl->
getAttr<AlignedAttrTy>();
5347 [[nodiscard]] llvm::AttributeList
5348 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5349 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5351 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5356 if (!AlignmentCI->getValue().isPowerOf2())
5358 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5361 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5369 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
5373 AA->getLocation(), Alignment, OffsetCI);
5379class AssumeAlignedAttrEmitter final
5380 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5382 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl)
5383 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5388 if (Expr *Offset = AA->getOffset()) {
5390 if (OffsetCI->isNullValue())
5397class AllocAlignAttrEmitter final
5398 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5400 AllocAlignAttrEmitter(CodeGenFunction &CGF_,
const Decl *FuncDecl,
5401 const CallArgList &CallArgs)
5402 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5406 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5415 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5416 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5417 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5420 unsigned MaxVectorWidth = 0;
5421 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5422 for (
auto *I : ST->elements())
5424 return MaxVectorWidth;
5431 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5433 bool IsVirtualFunctionPointerThunk) {
5436 assert(Callee.isOrdinary() || Callee.isVirtual());
5443 llvm::FunctionType *IRFuncTy =
getTypes().GetFunctionType(CallInfo);
5445 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5446 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5453 if ((TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5454 (TargetDecl->
hasAttr<TargetAttr>() ||
5458 TargetDecl->
hasAttr<TargetAttr>())))
5465 const FunctionDecl *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl);
5466 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
CGM, Loc, CallerDecl,
5467 CalleeDecl, CallArgs, RetTy);
5474 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5475 const llvm::DataLayout &DL =
CGM.getDataLayout();
5477 llvm::AllocaInst *AI;
5479 IP = IP->getNextNode();
5480 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5486 AI->setAlignment(Align.getAsAlign());
5487 AI->setUsedWithInAlloca(
true);
5488 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5489 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5492 ClangToLLVMArgMapping IRFunctionArgs(
CGM.getContext(), CallInfo);
5500 bool NeedSRetLifetimeEnd =
false;
5506 if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.
isIndirect()) {
5508 IRFunctionArgs.getSRetArgNo(),
5516 if (NeedSRetLifetimeEnd)
5517 SRetAlloca = SRetPtr;
5520 if (IRFunctionArgs.hasSRetArg()) {
5533 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5551 assert(CallInfo.
arg_size() == CallArgs.size() &&
5552 "Mismatch between function signature & arguments.");
5555 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5556 I != E; ++I, ++info_it, ++ArgNo) {
5560 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5561 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5564 unsigned FirstIRArg, NumIRArgs;
5565 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5567 bool ArgHasMaybeUndefAttr =
5572 assert(NumIRArgs == 0);
5574 if (I->isAggregate()) {
5576 ? I->getKnownLValue().getAddress()
5577 : I->getKnownRValue().getAggregateAddress();
5578 llvm::Instruction *Placeholder =
5583 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5584 Builder.SetInsertPoint(Placeholder);
5597 deferPlaceholderReplacement(Placeholder,
Addr.getPointer());
5602 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5603 "indirect-arg-temp");
5604 I->copyInto(*
this,
Addr);
5613 I->copyInto(*
this,
Addr);
5620 assert(NumIRArgs == 1);
5621 if (I->isAggregate()) {
5631 ? I->getKnownLValue().getAddress()
5632 : I->getKnownRValue().getAggregateAddress();
5634 const llvm::DataLayout *TD = &
CGM.getDataLayout();
5636 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5637 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5638 TD->getAllocaAddrSpace()) &&
5639 "indirect argument must be in alloca address space");
5641 bool NeedCopy =
false;
5642 if (
Addr.getAlignment() < Align &&
5643 llvm::getOrEnforceKnownAlignment(
Addr.emitRawPointer(*
this),
5647 }
else if (I->hasLValue()) {
5648 auto LV = I->getKnownLValue();
5653 if (!isByValOrRef ||
5654 (LV.getAlignment() <
getContext().getTypeAlignInChars(I->Ty))) {
5658 if (isByValOrRef &&
Addr.getType()->getAddressSpace() !=
5667 auto *T = llvm::PointerType::get(
CGM.getLLVMContext(),
5675 if (ArgHasMaybeUndefAttr)
5676 Val =
Builder.CreateFreeze(Val);
5677 IRCallArgs[FirstIRArg] = Val;
5680 }
else if (I->getType()->isArrayParameterType()) {
5686 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5695 if (ArgHasMaybeUndefAttr)
5696 Val =
Builder.CreateFreeze(Val);
5697 IRCallArgs[FirstIRArg] = Val;
5702 CallLifetimeEndAfterCall.emplace_back(AI);
5705 I->copyInto(*
this, AI);
5710 assert(NumIRArgs == 0);
5718 assert(NumIRArgs == 1);
5720 if (!I->isAggregate())
5721 V = I->getKnownRValue().getScalarVal();
5724 I->hasLValue() ? I->getKnownLValue().getAddress()
5725 : I->getKnownRValue().getAggregateAddress());
5731 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5735 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5742 llvm::Value *errorValue =
Builder.CreateLoad(swiftErrorArg);
5743 Builder.CreateStore(errorValue, swiftErrorTemp);
5748 V->getType()->isIntegerTy())
5755 if (FirstIRArg < IRFuncTy->getNumParams() &&
5756 V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
5757 assert(
V->getType()->isPointerTy() &&
"Only pointers can mismatch!");
5761 if (ArgHasMaybeUndefAttr)
5763 IRCallArgs[FirstIRArg] =
V;
5767 llvm::StructType *STy =
5772 if (!I->isAggregate()) {
5774 I->copyInto(*
this, Src);
5776 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5777 : I->getKnownRValue().getAggregateAddress();
5787 llvm::TypeSize SrcTypeSize =
5788 CGM.getDataLayout().getTypeAllocSize(SrcTy);
5789 llvm::TypeSize DstTypeSize =
CGM.getDataLayout().getTypeAllocSize(STy);
5790 if (SrcTypeSize.isScalable()) {
5791 assert(STy->containsHomogeneousScalableVectorTypes() &&
5792 "ABI only supports structure with homogeneous scalable vector "
5794 assert(SrcTypeSize == DstTypeSize &&
5795 "Only allow non-fractional movement of structure with "
5796 "homogeneous scalable vector type");
5797 assert(NumIRArgs == STy->getNumElements());
5799 llvm::Value *StoredStructValue =
5801 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5802 llvm::Value *Extract =
Builder.CreateExtractValue(
5803 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5804 IRCallArgs[FirstIRArg + i] = Extract;
5807 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5808 uint64_t DstSize = DstTypeSize.getFixedValue();
5809 bool HasPFPFields =
getContext().hasPFPFields(I->Ty);
5815 if (HasPFPFields || SrcSize < DstSize) {
5826 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5832 assert(NumIRArgs == STy->getNumElements());
5833 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5835 llvm::Value *LI =
Builder.CreateLoad(EltPtr);
5836 if (ArgHasMaybeUndefAttr)
5837 LI =
Builder.CreateFreeze(LI);
5838 IRCallArgs[FirstIRArg + i] = LI;
5843 assert(NumIRArgs == 1);
5851 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5856 if (ArgHasMaybeUndefAttr)
5857 Load =
Builder.CreateFreeze(Load);
5858 IRCallArgs[FirstIRArg] = Load;
5866 auto layout =
CGM.getDataLayout().getStructLayout(coercionType);
5868 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
5872 bool NeedLifetimeEnd =
false;
5873 if (I->isAggregate()) {
5874 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5875 : I->getKnownRValue().getAggregateAddress();
5878 RValue RV = I->getKnownRValue();
5882 auto scalarAlign =
CGM.getDataLayout().getPrefTypeAlign(scalarType);
5887 layout->getAlignment(), scalarAlign)),
5889 nullptr, &AllocaAddr);
5897 unsigned IRArgPos = FirstIRArg;
5898 unsigned unpaddedIndex = 0;
5899 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5900 llvm::Type *eltType = coercionType->getElementType(i);
5907 : unpaddedCoercionType,
5909 if (ArgHasMaybeUndefAttr)
5910 elt =
Builder.CreateFreeze(elt);
5911 IRCallArgs[IRArgPos++] = elt;
5913 assert(IRArgPos == FirstIRArg + NumIRArgs);
5915 if (NeedLifetimeEnd)
5921 unsigned IRArgPos = FirstIRArg;
5922 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5923 assert(IRArgPos == FirstIRArg + NumIRArgs);
5929 if (!I->isAggregate()) {
5931 I->copyInto(*
this, Src);
5933 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5934 : I->getKnownRValue().getAggregateAddress();
5940 CGM.getABIInfo().createCoercedLoad(Src, ArgInfo, *
this);
5941 IRCallArgs[FirstIRArg] = Load;
5947 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*
this);
5953 assert(IRFunctionArgs.hasInallocaArg());
5954 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5965 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5966 llvm::Value *Ptr) -> llvm::Function * {
5967 if (!CalleeFT->isVarArg())
5971 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5972 if (CE->getOpcode() == llvm::Instruction::BitCast)
5973 Ptr = CE->getOperand(0);
5976 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5980 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5984 if (OrigFT->isVarArg() ||
5985 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5986 OrigFT->getReturnType() != CalleeFT->getReturnType())
5989 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5990 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5996 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5998 IRFuncTy = OrigFn->getFunctionType();
6009 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
6010 LargestVectorWidth = std::max(LargestVectorWidth,
6015 llvm::AttributeList Attrs;
6016 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
6021 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
6023 CGM.Error(Loc,
"__vectorcall calling convention is not currently "
6028 if (FD->hasAttr<StrictFPAttr>())
6030 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
6035 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
6036 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
6041 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
6045 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
6050 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
6051 CallerDecl, CalleeDecl))
6053 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
6058 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
6067 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>()) &&
6068 !
CGM.getTargetCodeGenInfo().wouldInliningViolateFunctionCallABI(
6069 CallerDecl, CalleeDecl)) {
6071 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
6076 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
6083 CannotThrow =
false;
6092 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
6094 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
6095 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
6106 if (NeedSRetLifetimeEnd)
6114 if (
SanOpts.has(SanitizerKind::KCFI) &&
6115 !isa_and_nonnull<FunctionDecl>(TargetDecl))
6122 if (FD->hasAttr<StrictFPAttr>())
6124 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
6126 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
6127 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
6129 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
6130 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
6135 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
6138 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
6142 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
6143 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
6148 if (
CGM.getCodeGenOpts().CallGraphSection) {
6152 else if (
const auto *FPT =
6153 Callee.getAbstractInfo().getCalleeFunctionProtoType())
6157 "Cannot find the callee type to generate callee_type metadata.");
6161 CGM.createCalleeTypeMetadataForIcall(CST, *callOrInvoke);
6168 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
6169 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
6170 if (A->getGuard() == CFGuardAttr::GuardArg::nocf &&
6171 !CI->getCalledFunction())
6177 CI->setAttributes(Attrs);
6178 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
6182 if (!CI->getType()->isVoidTy())
6183 CI->setName(
"call");
6185 if (
CGM.shouldEmitConvergenceTokens() && CI->isConvergent())
6186 CI = addConvergenceControlToken(CI);
6189 LargestVectorWidth =
6195 if (!CI->getCalledFunction())
6196 PGO->valueProfile(
Builder, llvm::IPVK_IndirectCallTarget, CI, CalleePtr);
6200 if (
CGM.getLangOpts().ObjCAutoRefCount)
6201 AddObjCARCExceptionMetadata(CI);
6204 bool IsPPC =
getTarget().getTriple().isPPC();
6205 bool IsMIPS =
getTarget().getTriple().isMIPS();
6206 bool HasMips16 =
false;
6209 HasMips16 = TargetOpts.
FeatureMap.lookup(
"mips16");
6211 HasMips16 = llvm::is_contained(TargetOpts.
Features,
"+mips16");
6213 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
6214 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
6215 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
6216 else if (IsMustTail) {
6219 CGM.getDiags().Report(Loc, diag::err_aix_musttail_unsupported);
6222 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 0;
6223 else if (
Call->isIndirectCall())
6224 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 1;
6225 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
6230 CGM.addUndefinedGlobalForTailCall(
6233 llvm::GlobalValue::LinkageTypes
Linkage =
CGM.getFunctionLinkage(
6235 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
6236 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
6237 CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail)
6245 CGM.getDiags().Report(Loc, diag::err_mips_impossible_musttail) << 0;
6246 else if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
6247 CGM.addUndefinedGlobalForTailCall({FD, Loc});
6249 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
6263 bool NeedSrcLoc = TargetDecl->
hasAttr<ErrorAttr>();
6264 if (!NeedSrcLoc &&
CGM.getCodeGenOpts().ShowInliningChain) {
6265 if (
const auto *FD = dyn_cast<FunctionDecl>(TargetDecl))
6266 NeedSrcLoc = FD->isInlined() || FD->hasAttr<AlwaysInlineAttr>() ||
6268 FD->isInAnonymousNamespace();
6272 auto *MD = llvm::ConstantAsMetadata::get(
Line);
6273 CI->setMetadata(
"srcloc", llvm::MDNode::get(
getLLVMContext(), {MD}));
6282 if (CI->doesNotReturn()) {
6283 if (NeedSRetLifetimeEnd)
6287 if (
SanOpts.has(SanitizerKind::Unreachable)) {
6290 if (
auto *F = CI->getCalledFunction())
6291 F->removeFnAttr(llvm::Attribute::NoReturn);
6292 CI->removeFnAttr(llvm::Attribute::NoReturn);
6296 if (
SanOpts.hasOneOf(SanitizerKind::Address |
6297 SanitizerKind::KernelAddress)) {
6299 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
6301 auto *FnType = llvm::FunctionType::get(
CGM.VoidTy,
false);
6302 llvm::FunctionCallee Fn =
6303 CGM.CreateRuntimeFunction(FnType,
"__asan_handle_no_return");
6309 Builder.ClearInsertionPoint();
6331 if (CI->doesNotThrow())
6334 diag::err_musttail_noexcept_mismatch);
6340 if (Cleanup && Cleanup->isFakeUse()) {
6341 CGBuilderTy::InsertPointGuard IPG(
Builder);
6343 Cleanup->getCleanup()->Emit(*
this, EHScopeStack::Cleanup::Flags());
6344 }
else if (!(Cleanup &&
6345 Cleanup->getCleanup()->isRedundantBeforeReturn())) {
6346 CGM.ErrorUnsupported(
MustTailCall,
"tail call skipping over cleanups");
6349 if (CI->getType()->isVoidTy())
6353 Builder.ClearInsertionPoint();
6359 if (swiftErrorTemp.
isValid()) {
6360 llvm::Value *errorResult =
Builder.CreateLoad(swiftErrorTemp);
6361 Builder.CreateStore(errorResult, swiftErrorArg);
6378 if (IsVirtualFunctionPointerThunk) {
6391 unsigned unpaddedIndex = 0;
6392 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
6393 llvm::Type *eltType = coercionType->getElementType(i);
6397 llvm::Value *elt = CI;
6398 if (requiresExtract)
6399 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
6401 assert(unpaddedIndex == 0);
6402 Builder.CreateStore(elt, eltAddr);
6410 if (NeedSRetLifetimeEnd)
6427 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
6428 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
6436 llvm::Value *
V = CI;
6437 if (
V->getType() != RetIRTy)
6447 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
6448 llvm::Value *
V = CI;
6449 if (
auto *ScalableSrcTy =
6450 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
6451 if (FixedDstTy->getElementType() ==
6452 ScalableSrcTy->getElementType()) {
6453 V =
Builder.CreateExtractVector(FixedDstTy,
V, uint64_t(0),
6463 getContext().getTypeInfoDataSizeInChars(RetTy).Width.getQuantity();
6467 DestIsVolatile =
false;
6468 DestSize =
getContext().getTypeSizeInChars(RetTy).getQuantity();
6478 CI, RetTy, StorePtr,
6492 DestIsVolatile =
false;
6494 CGM.getABIInfo().createCoercedStore(CI, StorePtr, RetAI, DestIsVolatile,
6501 llvm_unreachable(
"Invalid ABI kind for return argument");
6504 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6509 if (Ret.isScalar() && TargetDecl) {
6510 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6511 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6517 LifetimeEnd.Emit(*
this, {});
6529 if (CalleeDecl && !CalleeDecl->
hasAttr<NoDebugAttr>() &&
6530 DI->getCallSiteRelatedAttrs() != llvm::DINode::FlagZero) {
6531 CodeGenFunction CalleeCGF(
CGM);
6533 Callee.getAbstractInfo().getCalleeDecl();
6534 CalleeCGF.
CurGD = CalleeGlobalDecl;
6537 DI->EmitFuncDeclForCallSite(
6538 CI, DI->getFunctionType(CalleeDecl, ResTy, Args), CalleeGlobalDecl);
6541 DI->addCallTargetIfVirtual(CalleeDecl, CI);
6567 if (
VE->isMicrosoftABI())
6568 return CGM.getABIInfo().EmitMSVAArg(*
this, VAListAddr, Ty, Slot);
6569 return CGM.getABIInfo().EmitVAArg(*
this, VAListAddr, Ty, Slot);
6574 CGF.disableDebugInfo();
6578 CGF.enableDebugInfo();
static ExtParameterInfoList getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static CanQualTypeList getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsTargetDefaultMSABI)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static llvm::Value * CreatePFPCoercedLoad(Address Src, QualType SrcFETy, llvm::Type *Ty, CodeGenFunction &CGF)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
SmallVector< CanQualType, 16 > CanQualTypeList
static std::pair< llvm::Value *, bool > CoerceScalableToFixed(CodeGenFunction &CGF, llvm::FixedVectorType *ToTy, llvm::ScalableVectorType *FromTy, llvm::Value *V, StringRef Name="")
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static llvm::Value * CreateCoercedLoad(Address Src, QualType SrcFETy, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
SmallVector< FunctionProtoType::ExtParameterInfo, 16 > ExtParameterInfoList
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static CanQualTypeList getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static bool CreatePFPCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, CodeGenFunction &CGF)
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
static void addNoBuiltinAttributes(mlir::MLIRContext &ctx, mlir::NamedAttrList &attrs, const LangOptions &langOpts, const NoBuiltinAttr *nba=nullptr)
static void addDenormalModeAttrs(llvm::DenormalMode fpDenormalMode, llvm::DenormalMode fp32DenormalMode, mlir::NamedAttrList &attrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static unsigned getNoFPClassTestMask(const LangOptions &langOpts)
Compute the nofpclass mask for FP types based on language options.
static void appendParameterTypes(const CIRGenTypes &cgt, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > fpt)
Adds the formal parameters in FPT to the given prefix.
static const CIRGenFunctionInfo & arrangeFreeFunctionLikeCall(CIRGenTypes &cgt, CIRGenModule &cgm, const CallArgList &args, const FunctionType *fnType)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Result
Implement __builtin_bit_cast and related operations.
#define CC_VLS_CASE(ABI_VLEN)
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static StringRef getTriple(const Command &Job)
Maps Clang QualType instances to corresponding LLVM ABI type representations.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
CanQualType getCanonicalSizeType() const
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
std::vector< PFPField > findPFPFields(QualType Ty) const
Returns a list of PFP fields for the given type, including subfields in bases or other fields,...
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
This class is used for builtin types like 'int'.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const
ConstExprIterator const_arg_iterator
Represents a canonical, potentially-qualified type.
static CanQual< Type > CreateUnsafe(QualType Other)
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
static ABIArgInfo getIgnore()
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ TargetSpecific
TargetSpecific - Some argument types are passed as target specific types such as RISC-V's tuple type,...
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getIndirect(CharUnits Alignment, unsigned AddrSpace, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
static ABIArgInfo getZeroExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
static ABIArgInfo getSignExtend(QualType Ty, llvm::Type *T=nullptr)
CharUnits getIndirectAlign() const
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
unsigned getAddressSpace() const
Return the address space that this address resides in.
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
llvm::StringRef getName() const
Return the IR name of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
const ArgInfo * const_arg_iterator
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
const Decl * getDecl() const
unsigned getNumParams() const
bool hasFunctionDecl() const
An object to manage conditionally-evaluated expressions.
void begin(CodeGenFunction &CGF)
void end(CodeGenFunction &CGF)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
void callCStructDestructor(LValue Dst)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void addInstToSpecificSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom)
See CGDebugInfo::addInstToSpecificSourceAtom.
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
const CodeGen::CGBlockInfo * BlockInfo
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void CreateCoercedStore(llvm::Value *Src, QualType SrcFETy, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
llvm::BasicBlock * getInvokeDest()
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
CGDebugInfo * getDebugInfo()
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
void EmitLifetimeEnd(llvm::Value *Addr)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitAddressOfPFPField(Address RecordPtr, const PFPField &Field)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
llvm::Type * ConvertTypeForMem(QualType T)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc, uint64_t RetKeyInstructionsSourceAtom)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CallExpr * MustTailCall
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
This class organizes the cross-function state that is used while generating LLVM code.
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
void computeABIInfoUsingLib(CGFunctionInfo &FI)
Drive the experimental LLVMABI-based lowering path: map argument and return types into the LLVMABI li...
const llvm::DataLayout & getDataLayout() const
ObjCEntrypoints & getObjCEntrypoints() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
const llvm::abi::TargetInfo & getLLVMABITargetInfo(llvm::abi::TypeBuilder &TB)
Lazily build and return the LLVMABI library's TargetInfo for the current target.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeDeviceKernelCallerDeclaration(QualType resultType, const FunctionArgList &args)
A device kernel caller function is an offload device entry point function with a target device depend...
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeFunctionDeclaration(const GlobalDecl GD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
A saved depth on the scope stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
static void initPointerAuthFnAttributes(const PointerAuthOptions &Opts, llvm::AttrBuilder &FuncAttrs)
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
const FunctionType * getFunctionType(bool BlocksToo=true) const
Looks through the Decl's underlying type to extract a FunctionType when possible.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
bool isZeroLengthBitField() const
Is this a zero-length bit-field?
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
@ SME_AgnosticZAStateMask
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
KernelReferenceKind getKernelReferenceKind() const
CXXDtorType getDtorType() const
const Decl * getDecl() const
This class represents temporary values used to represent inout and out arguments in HLSL.
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
field_iterator field_end() const
bool isParamDestroyedInCallee() const
field_iterator field_begin() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
The base class of the type hierarchy.
bool isIncompleteArrayType() const
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
RecordDecl * castAsRecordDecl() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isRecordType() const
bool isObjCRetainableType() const
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool This(InterpState &S, CodePtr OpPC)
@ Address
A pointer to a ValueDecl.
PRESERVE_NONE bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isa(CodeGen::Address addr)
static bool classof(const OMPClause *T)
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_VectorDeleting
Vector deleting dtor.
@ Dtor_Complete
Complete object dtor.
@ Dtor_Deleting
Deleting dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
U cast(CodeGen::Address addr)
@ Struct
The "struct" keyword introduces the elaborated-type-specifier.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
const Expr * WritebackExpr
An Expression (optional) that performs the writeback with any required casting.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
~DisableDebugLocationUpdates()
DisableDebugLocationUpdates(CodeGenFunction &CGF)
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.