32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
46using namespace CodeGen;
52 default:
return llvm::CallingConv::C;
57 case CC_Win64:
return llvm::CallingConv::Win64;
59 case CC_AAPCS:
return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
73 case CC_Swift:
return llvm::CallingConv::Swift;
75 case CC_M68kRTD:
return llvm::CallingConv::M68k_RTD;
129 unsigned totalArgs) {
131 assert(paramInfos.size() <= prefixArgs);
132 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
134 paramInfos.reserve(totalArgs);
137 paramInfos.resize(prefixArgs);
141 paramInfos.push_back(ParamInfo);
143 if (ParamInfo.hasPassObjectSize())
144 paramInfos.emplace_back();
147 assert(paramInfos.size() <= totalArgs &&
148 "Did we forget to insert pass_object_size args?");
150 paramInfos.resize(totalArgs);
160 if (!FPT->hasExtParameterInfos()) {
161 assert(paramInfos.empty() &&
162 "We have paramInfos, but the prototype doesn't?");
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
167 unsigned PrefixSize = prefix.size();
171 prefix.reserve(prefix.size() + FPT->getNumParams());
173 auto ExtInfos = FPT->getExtParameterInfos();
174 assert(ExtInfos.size() == FPT->getNumParams());
175 for (
unsigned I = 0,
E = FPT->getNumParams(); I !=
E; ++I) {
176 prefix.push_back(FPT->getParamType(I));
177 if (ExtInfos[I].hasPassObjectSize())
200 FTP->getExtInfo(), paramInfos,
Required);
208 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
233 if (PcsAttr *PCS =
D->
getAttr<PcsAttr>())
236 if (
D->
hasAttr<AArch64VectorPcsAttr>())
239 if (
D->
hasAttr<AArch64SVEPcsAttr>())
242 if (
D->
hasAttr<AMDGPUKernelCallAttr>())
266 if (
D->
hasAttr<RISCVVectorCCAttr>())
287 return ::arrangeLLVMFunctionInfo(
288 *
this,
true, argTypes,
295 if (FD->
hasAttr<CUDAGlobalAttr>()) {
308 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
309 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
331 !
Target.getCXXABI().hasConstructorVariants();
336 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
344 bool PassParams =
true;
346 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
349 if (
auto Inherited = CD->getInheritedConstructor())
361 if (!paramInfos.empty()) {
364 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
367 paramInfos.append(AddedArgs.
Suffix,
372 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
381 argTypes, extInfo, paramInfos, required);
387 for (
auto &arg : args)
395 for (
auto &arg : args)
402 unsigned prefixArgs,
unsigned totalArgs) {
422 unsigned ExtraPrefixArgs,
423 unsigned ExtraSuffixArgs,
424 bool PassProtoArgs) {
427 for (
const auto &Arg : args)
431 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
436 FPT, TotalPrefixArgs + ExtraSuffixArgs)
449 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
456 ArgTypes, Info, ParamInfos,
Required);
464 if (MD->isImplicitObjectMemberFunction())
469 assert(isa<FunctionType>(FTy));
476 {}, noProto->getExtInfo(), {},
511 I->hasAttr<NoEscapeAttr>());
512 extParamInfos.push_back(extParamInfo);
519 if (
getContext().getLangOpts().ObjCAutoRefCount &&
520 MD->
hasAttr<NSReturnsRetainedAttr>())
546 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
547 isa<CXXDestructorDecl>(GD.
getDecl()))
560 assert(MD->
isVirtual() &&
"only methods have thunks");
577 ArgTys.push_back(*FTP->param_type_begin());
579 ArgTys.push_back(Context.
IntTy);
594 unsigned numExtraRequiredArgs,
596 assert(args.size() >= numExtraRequiredArgs);
606 if (proto->isVariadic())
609 if (proto->hasExtParameterInfos())
619 cast<FunctionNoProtoType>(fnType))) {
625 for (
const auto &arg : args)
630 paramInfos, required);
642 chainCall ? 1 : 0, chainCall);
671 for (
const auto &Arg : args)
704 unsigned numPrefixArgs) {
705 assert(numPrefixArgs + 1 <= args.size() &&
706 "Emitting a call with less args than the required prefix?");
718 paramInfos, required);
730 assert(signature.
arg_size() <= args.size());
731 if (signature.
arg_size() == args.size())
736 if (!sigParamInfos.empty()) {
737 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
738 paramInfos.resize(args.size());
770 assert(llvm::all_of(argTypes,
774 llvm::FoldingSetNodeID ID;
779 bool isDelegateCall =
782 info, paramInfos, required, resultType, argTypes);
784 void *insertPos =
nullptr;
785 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
793 info, paramInfos, resultType, argTypes, required);
794 FunctionInfos.InsertNode(FI, insertPos);
796 bool inserted = FunctionsBeingProcessed.insert(FI).second;
798 assert(inserted &&
"Recursively being processed?");
801 if (CC == llvm::CallingConv::SPIR_KERNEL) {
819 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
822 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
823 assert(erased &&
"Not in set?");
829 bool chainCall,
bool delegateCall,
835 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
840 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
841 argTypes.size() + 1, paramInfos.size()));
844 FI->CallingConvention = llvmCC;
845 FI->EffectiveCallingConvention = llvmCC;
846 FI->ASTCallingConvention = info.
getCC();
847 FI->InstanceMethod = instanceMethod;
848 FI->ChainCall = chainCall;
849 FI->DelegateCall = delegateCall;
855 FI->Required = required;
858 FI->ArgStruct =
nullptr;
859 FI->ArgStructAlign = 0;
860 FI->NumArgs = argTypes.size();
861 FI->HasExtParameterInfos = !paramInfos.empty();
862 FI->getArgsBuffer()[0].
type = resultType;
863 FI->MaxVectorWidth = 0;
864 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
865 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
866 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
867 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
877struct TypeExpansion {
878 enum TypeExpansionKind {
890 const TypeExpansionKind
Kind;
892 TypeExpansion(TypeExpansionKind K) :
Kind(K) {}
893 virtual ~TypeExpansion() {}
896struct ConstantArrayExpansion : TypeExpansion {
900 ConstantArrayExpansion(
QualType EltTy, uint64_t NumElts)
901 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
902 static bool classof(
const TypeExpansion *TE) {
903 return TE->Kind == TEK_ConstantArray;
907struct RecordExpansion : TypeExpansion {
914 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
915 Fields(
std::move(Fields)) {}
916 static bool classof(
const TypeExpansion *TE) {
917 return TE->Kind == TEK_Record;
921struct ComplexExpansion : TypeExpansion {
925 static bool classof(
const TypeExpansion *TE) {
930struct NoExpansion : TypeExpansion {
931 NoExpansion() : TypeExpansion(TEK_None) {}
932 static bool classof(
const TypeExpansion *TE) {
933 return TE->Kind == TEK_None;
938static std::unique_ptr<TypeExpansion>
941 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
949 "Cannot expand structure with flexible array.");
956 for (
const auto *FD : RD->
fields()) {
957 if (FD->isZeroLengthBitField(Context))
959 assert(!FD->isBitField() &&
960 "Cannot expand structure with bit-field members.");
962 if (UnionSize < FieldSize) {
963 UnionSize = FieldSize;
968 Fields.push_back(LargestFD);
970 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
971 assert(!CXXRD->isDynamicClass() &&
972 "cannot expand vtable pointers in dynamic classes");
973 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
976 for (
const auto *FD : RD->
fields()) {
977 if (FD->isZeroLengthBitField(Context))
979 assert(!FD->isBitField() &&
980 "Cannot expand structure with bit-field members.");
981 Fields.push_back(FD);
984 return std::make_unique<RecordExpansion>(std::move(Bases),
988 return std::make_unique<ComplexExpansion>(CT->getElementType());
990 return std::make_unique<NoExpansion>();
995 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
998 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1000 for (
auto BS : RExp->Bases)
1002 for (
auto FD : RExp->Fields)
1006 if (isa<ComplexExpansion>(Exp.get()))
1008 assert(isa<NoExpansion>(Exp.get()));
1016 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1017 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1020 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1021 for (
auto BS : RExp->Bases)
1023 for (
auto FD : RExp->Fields)
1025 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1030 assert(isa<NoExpansion>(Exp.get()));
1036 ConstantArrayExpansion *CAE,
1038 llvm::function_ref<
void(
Address)> Fn) {
1039 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1046 llvm::Function::arg_iterator &AI) {
1048 "Unexpected non-simple lvalue during struct expansion.");
1051 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1054 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1055 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1057 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1067 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1069 for (
auto FD : RExp->Fields) {
1072 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1074 }
else if (isa<ComplexExpansion>(Exp.get())) {
1075 auto realValue = &*AI++;
1076 auto imagValue = &*AI++;
1081 assert(isa<NoExpansion>(Exp.get()));
1082 llvm::Value *Arg = &*AI++;
1089 if (Arg->getType()->isPointerTy()) {
1098void CodeGenFunction::ExpandTypeToArgs(
1102 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1106 *
this, CAExp, Addr, [&](
Address EltAddr) {
1110 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1113 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1124 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1129 for (
auto FD : RExp->Fields) {
1132 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1135 }
else if (isa<ComplexExpansion>(Exp.get())) {
1137 IRCallArgs[IRCallArgPos++] = CV.first;
1138 IRCallArgs[IRCallArgPos++] = CV.second;
1140 assert(isa<NoExpansion>(Exp.get()));
1142 assert(RV.isScalar() &&
1143 "Unexpected non-scalar rvalue during struct expansion.");
1146 llvm::Value *
V = RV.getScalarVal();
1147 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1148 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1149 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1151 IRCallArgs[IRCallArgPos++] =
V;
1159 const Twine &Name =
"tmp") {
1173 llvm::StructType *SrcSTy,
1176 if (SrcSTy->getNumElements() == 0)
return SrcPtr;
1184 uint64_t FirstEltSize =
1186 if (FirstEltSize < DstSize &&
1195 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1211 if (Val->getType() == Ty)
1214 if (isa<llvm::PointerType>(Val->getType())) {
1216 if (isa<llvm::PointerType>(Ty))
1217 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1223 llvm::Type *DestIntTy = Ty;
1224 if (isa<llvm::PointerType>(DestIntTy))
1227 if (Val->getType() != DestIntTy) {
1229 if (DL.isBigEndian()) {
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1233 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1235 if (SrcSize > DstSize) {
1236 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1237 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1239 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1240 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1244 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1248 if (isa<llvm::PointerType>(Ty))
1249 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1272 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1274 DstSize.getFixedValue(), CGF);
1282 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1283 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1289 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1290 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1304 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1305 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1311 ScalableDstTy = llvm::ScalableVectorType::get(
1312 FixedSrcTy->getElementType(),
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1317 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1318 auto *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1320 ScalableDstTy, PoisonVec, Load, Zero,
"cast.scalable");
1321 if (ScalableDstTy != Ty)
1334 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1339 llvm::TypeSize DstSize,
1340 bool DstIsVolatile) {
1344 llvm::Type *SrcTy = Src->getType();
1351 if (llvm::StructType *DstSTy =
1353 assert(!SrcSize.isScalable());
1355 SrcSize.getFixedValue(), *
this);
1359 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1360 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1365 }
else if (llvm::StructType *STy =
1366 dyn_cast<llvm::StructType>(Src->getType())) {
1369 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1371 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1377 }
else if (SrcTy->isIntegerTy()) {
1379 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1413static std::pair<llvm::Value *, bool>
1415 llvm::ScalableVectorType *FromTy, llvm::Value *
V,
1416 StringRef Name =
"") {
1419 if (FromTy->getElementType()->isIntegerTy(1) &&
1420 FromTy->getElementCount().isKnownMultipleOf(8) &&
1421 ToTy->getElementType() == CGF.
Builder.getInt8Ty()) {
1422 FromTy = llvm::ScalableVectorType::get(
1423 ToTy->getElementType(),
1424 FromTy->getElementCount().getKnownMinValue() / 8);
1425 V = CGF.
Builder.CreateBitCast(
V, FromTy);
1427 if (FromTy->getElementType() == ToTy->getElementType()) {
1428 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1430 V->setName(Name +
".coerce");
1431 V = CGF.
Builder.CreateExtractVector(ToTy,
V, Zero,
"cast.fixed");
1441class ClangToLLVMArgMapping {
1442 static const unsigned InvalidIndex = ~0
U;
1443 unsigned InallocaArgNo;
1445 unsigned TotalIRArgs;
1449 unsigned PaddingArgIndex;
1452 unsigned FirstArgIndex;
1453 unsigned NumberOfArgs;
1456 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1464 bool OnlyRequiredArgs =
false)
1465 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1466 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1467 construct(Context, FI, OnlyRequiredArgs);
1470 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1471 unsigned getInallocaArgNo()
const {
1472 assert(hasInallocaArg());
1473 return InallocaArgNo;
1476 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1477 unsigned getSRetArgNo()
const {
1478 assert(hasSRetArg());
1482 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1484 bool hasPaddingArg(
unsigned ArgNo)
const {
1485 assert(ArgNo < ArgInfo.size());
1486 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1488 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1489 assert(hasPaddingArg(ArgNo));
1490 return ArgInfo[ArgNo].PaddingArgIndex;
1495 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1496 assert(ArgNo < ArgInfo.size());
1497 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1498 ArgInfo[ArgNo].NumberOfArgs);
1503 bool OnlyRequiredArgs);
1506void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1508 bool OnlyRequiredArgs) {
1509 unsigned IRArgNo = 0;
1510 bool SwapThisWithSRet =
false;
1515 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1526 auto &IRArgs = ArgInfo[ArgNo];
1529 IRArgs.PaddingArgIndex = IRArgNo++;
1535 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1537 IRArgs.NumberOfArgs = STy->getNumElements();
1539 IRArgs.NumberOfArgs = 1;
1545 IRArgs.NumberOfArgs = 1;
1550 IRArgs.NumberOfArgs = 0;
1560 if (IRArgs.NumberOfArgs > 0) {
1561 IRArgs.FirstArgIndex = IRArgNo;
1562 IRArgNo += IRArgs.NumberOfArgs;
1567 if (IRArgNo == 1 && SwapThisWithSRet)
1570 assert(ArgNo == ArgInfo.size());
1573 InallocaArgNo = IRArgNo++;
1575 TotalIRArgs = IRArgNo;
1583 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1598 switch (BT->getKind()) {
1601 case BuiltinType::Float:
1603 case BuiltinType::Double:
1605 case BuiltinType::LongDouble:
1616 if (BT->getKind() == BuiltinType::LongDouble)
1632 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1634 assert(Inserted &&
"Recursively being processed?");
1636 llvm::Type *resultType =
nullptr;
1641 llvm_unreachable(
"Invalid ABI kind for return argument");
1653 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1669 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1673 if (IRFunctionArgs.hasSRetArg()) {
1676 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1681 if (IRFunctionArgs.hasInallocaArg())
1682 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1689 for (; it != ie; ++it, ++ArgNo) {
1693 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1694 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1697 unsigned FirstIRArg, NumIRArgs;
1698 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1703 assert(NumIRArgs == 0);
1707 assert(NumIRArgs == 1);
1709 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1713 assert(NumIRArgs == 1);
1714 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1722 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1724 assert(NumIRArgs == st->getNumElements());
1725 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1726 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1728 assert(NumIRArgs == 1);
1729 ArgTypes[FirstIRArg] = argType;
1735 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1737 *ArgTypesIter++ = EltTy;
1739 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1744 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1746 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1751 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1752 assert(Erased &&
"Not in set?");
1754 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1768 llvm::AttrBuilder &FuncAttrs,
1775 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1779 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1781 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1785 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1787 FuncAttrs.addAttribute(
"aarch64_in_za");
1789 FuncAttrs.addAttribute(
"aarch64_out_za");
1791 FuncAttrs.addAttribute(
"aarch64_inout_za");
1795 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1797 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1799 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1801 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1805 const Decl *Callee) {
1811 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1812 AA->getAssumption().split(Attrs,
",");
1815 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1816 llvm::join(Attrs.begin(), Attrs.end(),
","));
1825 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1826 return ClassDecl->hasTrivialDestructor();
1832 const Decl *TargetDecl) {
1838 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1842 if (!
Module.getLangOpts().CPlusPlus)
1845 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1846 if (FDecl->isExternC())
1848 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1850 if (VDecl->isExternC())
1858 return Module.getCodeGenOpts().StrictReturn ||
1859 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1860 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1867 llvm::DenormalMode FP32DenormalMode,
1868 llvm::AttrBuilder &FuncAttrs) {
1869 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1870 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1872 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1873 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1881 llvm::AttrBuilder &FuncAttrs) {
1887 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1889 llvm::AttrBuilder &FuncAttrs) {
1892 if (CodeGenOpts.OptimizeSize)
1893 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1894 if (CodeGenOpts.OptimizeSize == 2)
1895 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1898 if (CodeGenOpts.DisableRedZone)
1899 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1900 if (CodeGenOpts.IndirectTlsSegRefs)
1901 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1902 if (CodeGenOpts.NoImplicitFloat)
1903 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1905 if (AttrOnCallSite) {
1910 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1912 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1914 switch (CodeGenOpts.getFramePointer()) {
1921 FuncAttrs.addAttribute(
"frame-pointer",
1923 CodeGenOpts.getFramePointer()));
1926 if (CodeGenOpts.LessPreciseFPMAD)
1927 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
1929 if (CodeGenOpts.NullPointerIsValid)
1930 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1933 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
1937 if (LangOpts.NoHonorInfs)
1938 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
1939 if (LangOpts.NoHonorNaNs)
1940 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
1941 if (LangOpts.ApproxFunc)
1942 FuncAttrs.addAttribute(
"approx-func-fp-math",
"true");
1943 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1944 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1945 (LangOpts.getDefaultFPContractMode() ==
1947 LangOpts.getDefaultFPContractMode() ==
1949 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
1950 if (CodeGenOpts.SoftFloat)
1951 FuncAttrs.addAttribute(
"use-soft-float",
"true");
1952 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
1953 llvm::utostr(CodeGenOpts.SSPBufferSize));
1954 if (LangOpts.NoSignedZero)
1955 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
1958 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
1959 if (!Recips.empty())
1960 FuncAttrs.addAttribute(
"reciprocal-estimates",
1961 llvm::join(Recips,
","));
1965 FuncAttrs.addAttribute(
"prefer-vector-width",
1968 if (CodeGenOpts.StackRealignment)
1969 FuncAttrs.addAttribute(
"stackrealign");
1970 if (CodeGenOpts.Backchain)
1971 FuncAttrs.addAttribute(
"backchain");
1972 if (CodeGenOpts.EnableSegmentedStacks)
1973 FuncAttrs.addAttribute(
"split-stack");
1975 if (CodeGenOpts.SpeculativeLoadHardening)
1976 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1979 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1980 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1981 FuncAttrs.removeAttribute(
"zero-call-used-regs");
1983 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1984 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
1986 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1987 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
1989 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1990 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
1992 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1993 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
1995 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1996 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
1998 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1999 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2001 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2002 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2004 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2005 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2016 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2021 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2022 LangOpts.SYCLIsDevice) {
2023 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2026 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2027 FuncAttrs.addAttribute(
"save-reg-params");
2030 StringRef Var,
Value;
2032 FuncAttrs.addAttribute(Var,
Value);
2046 const llvm::Function &F,
2048 auto FFeatures = F.getFnAttribute(
"target-features");
2050 llvm::StringSet<> MergedNames;
2052 MergedFeatures.reserve(TargetOpts.
Features.size());
2054 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2055 for (StringRef Feature : FeatureRange) {
2056 if (Feature.empty())
2058 assert(Feature[0] ==
'+' || Feature[0] ==
'-');
2059 StringRef Name = Feature.drop_front(1);
2060 bool Merged = !MergedNames.insert(Name).second;
2062 MergedFeatures.push_back(Feature);
2066 if (FFeatures.isValid())
2067 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2068 AddUnmergedFeatures(TargetOpts.
Features);
2070 if (!MergedFeatures.empty()) {
2071 llvm::sort(MergedFeatures);
2072 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2079 bool WillInternalize) {
2081 llvm::AttrBuilder FuncAttrs(F.getContext());
2084 if (!TargetOpts.
CPU.empty())
2085 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2086 if (!TargetOpts.
TuneCPU.empty())
2087 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2090 CodeGenOpts, LangOpts,
2093 if (!WillInternalize && F.isInterposable()) {
2098 F.addFnAttrs(FuncAttrs);
2102 llvm::AttributeMask AttrsToRemove;
2104 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2105 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2106 llvm::DenormalMode Merged =
2110 if (DenormModeToMergeF32.isValid()) {
2115 if (Merged == llvm::DenormalMode::getDefault()) {
2116 AttrsToRemove.addAttribute(
"denormal-fp-math");
2117 }
else if (Merged != DenormModeToMerge) {
2119 FuncAttrs.addAttribute(
"denormal-fp-math",
2123 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2124 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2125 }
else if (MergedF32 != DenormModeToMergeF32) {
2127 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2131 F.removeFnAttrs(AttrsToRemove);
2136 F.addFnAttrs(FuncAttrs);
2139void CodeGenModule::getTrivialDefaultFunctionAttributes(
2140 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2141 llvm::AttrBuilder &FuncAttrs) {
2142 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2147void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2149 bool AttrOnCallSite,
2150 llvm::AttrBuilder &FuncAttrs) {
2151 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2155 if (!AttrOnCallSite)
2160 llvm::AttrBuilder &attrs) {
2161 getDefaultFunctionAttributes(
"",
false,
2163 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2168 const NoBuiltinAttr *NBA =
nullptr) {
2169 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2171 AttributeName +=
"no-builtin-";
2172 AttributeName += BuiltinName;
2173 FuncAttrs.addAttribute(AttributeName);
2177 if (LangOpts.NoBuiltin) {
2179 FuncAttrs.addAttribute(
"no-builtins");
2193 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2194 FuncAttrs.addAttribute(
"no-builtins");
2199 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2203 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2204 bool CheckCoerce =
true) {
2205 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2211 if (!DL.typeSizeEqualsStoreSize(Ty))
2218 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2219 DL.getTypeSizeInBits(Ty)))
2243 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2245 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2254 unsigned NumRequiredArgs,
unsigned ArgNo) {
2255 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2260 if (ArgNo >= NumRequiredArgs)
2264 if (ArgNo < FD->getNumParams()) {
2265 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2266 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2283 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2286 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2288 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2289 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2298 llvm::FPClassTest Mask = llvm::fcNone;
2299 if (LangOpts.NoHonorInfs)
2300 Mask |= llvm::fcInf;
2301 if (LangOpts.NoHonorNaNs)
2302 Mask |= llvm::fcNan;
2308 llvm::AttributeList &Attrs) {
2309 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2310 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2311 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2337 llvm::AttributeList &AttrList,
2339 bool AttrOnCallSite,
bool IsThunk) {
2347 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2349 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2361 bool HasOptnone =
false;
2363 const NoBuiltinAttr *NBA =
nullptr;
2367 auto AddPotentialArgAccess = [&]() {
2368 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2370 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2371 llvm::MemoryEffects::argMemOnly());
2378 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2379 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2380 if (TargetDecl->
hasAttr<NoThrowAttr>())
2381 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2382 if (TargetDecl->
hasAttr<NoReturnAttr>())
2383 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2384 if (TargetDecl->
hasAttr<ColdAttr>())
2385 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2386 if (TargetDecl->
hasAttr<HotAttr>())
2387 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2388 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2389 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2390 if (TargetDecl->
hasAttr<ConvergentAttr>())
2391 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2393 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2396 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2398 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2400 (Kind == OO_New || Kind == OO_Array_New))
2401 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2404 const bool IsVirtualCall = MD && MD->
isVirtual();
2407 if (!(AttrOnCallSite && IsVirtualCall)) {
2408 if (Fn->isNoReturn())
2409 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2410 NBA = Fn->getAttr<NoBuiltinAttr>();
2414 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2417 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2418 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2422 if (TargetDecl->
hasAttr<ConstAttr>()) {
2423 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2424 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2427 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2428 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2429 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2430 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2432 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2433 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2434 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2435 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2437 if (TargetDecl->
hasAttr<RestrictAttr>())
2438 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2439 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2440 !CodeGenOpts.NullPointerIsValid)
2441 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2442 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2443 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2444 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2445 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2446 if (TargetDecl->
hasAttr<LeafAttr>())
2447 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2448 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2449 FuncAttrs.addAttribute(
"bpf_fastcall");
2451 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2452 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2453 std::optional<unsigned> NumElemsParam;
2454 if (AllocSize->getNumElemsParam().isValid())
2455 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2456 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2460 if (TargetDecl->
hasAttr<OpenCLKernelAttr>()) {
2463 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2470 FuncAttrs.addAttribute(
2471 "uniform-work-group-size",
2472 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2476 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2478 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2480 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2481 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2493 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2498 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2499 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2500 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2501 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2502 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2503 FuncAttrs.removeAttribute(
"split-stack");
2504 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2507 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2508 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2509 FuncAttrs.addAttribute(
2510 "zero-call-used-regs",
2511 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2518 if (CodeGenOpts.NoPLT) {
2519 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2520 if (!Fn->isDefined() && !AttrOnCallSite) {
2521 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2526 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2527 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2532 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2533 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2534 if (!FD->isExternallyVisible())
2535 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2542 if (!AttrOnCallSite) {
2543 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2544 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2547 auto shouldDisableTailCalls = [&] {
2549 if (CodeGenOpts.DisableTailCalls)
2555 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2556 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2559 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2560 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2561 if (!BD->doesNotEscape())
2567 if (shouldDisableTailCalls())
2568 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2572 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2576 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2583 if (CodeGenOpts.EnableNoundefAttrs &&
2587 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2593 RetAttrs.addAttribute(llvm::Attribute::SExt);
2595 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2597 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2601 RetAttrs.addAttribute(llvm::Attribute::InReg);
2613 AddPotentialArgAccess();
2622 llvm_unreachable(
"Invalid ABI kind for return argument");
2630 RetAttrs.addDereferenceableAttr(
2632 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2633 !CodeGenOpts.NullPointerIsValid)
2634 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2636 llvm::Align Alignment =
2638 RetAttrs.addAlignmentAttr(Alignment);
2643 bool hasUsedSRet =
false;
2647 if (IRFunctionArgs.hasSRetArg()) {
2649 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2650 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2651 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2654 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2656 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2661 if (IRFunctionArgs.hasInallocaArg()) {
2664 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2673 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2675 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2682 if (!CodeGenOpts.NullPointerIsValid &&
2684 Attrs.addAttribute(llvm::Attribute::NonNull);
2691 Attrs.addDereferenceableOrNullAttr(
2697 llvm::Align Alignment =
2701 Attrs.addAlignmentAttr(Alignment);
2703 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2709 I !=
E; ++I, ++ArgNo) {
2715 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2717 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2718 llvm::AttributeSet::get(
2720 llvm::AttrBuilder(
getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2725 if (CodeGenOpts.EnableNoundefAttrs &&
2727 Attrs.addAttribute(llvm::Attribute::NoUndef);
2736 Attrs.addAttribute(llvm::Attribute::SExt);
2738 Attrs.addAttribute(llvm::Attribute::ZExt);
2740 Attrs.addAttribute(llvm::Attribute::NoExt);
2744 Attrs.addAttribute(llvm::Attribute::Nest);
2746 Attrs.addAttribute(llvm::Attribute::InReg);
2747 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2754 Attrs.addAttribute(llvm::Attribute::InReg);
2757 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2760 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2761 Decl->getArgPassingRestrictions() ==
2765 Attrs.addAttribute(llvm::Attribute::NoAlias);
2790 AddPotentialArgAccess();
2795 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2806 AddPotentialArgAccess();
2813 Attrs.addDereferenceableAttr(
2815 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2816 !CodeGenOpts.NullPointerIsValid)
2817 Attrs.addAttribute(llvm::Attribute::NonNull);
2819 llvm::Align Alignment =
2821 Attrs.addAlignmentAttr(Alignment);
2829 if (TargetDecl && TargetDecl->
hasAttr<OpenCLKernelAttr>() &&
2833 llvm::Align Alignment =
2835 Attrs.addAlignmentAttr(Alignment);
2842 Attrs.addAttribute(llvm::Attribute::NoAlias);
2851 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2856 Attrs.addAttribute(llvm::Attribute::NoAlias);
2860 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2862 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2863 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2869 Attrs.addAttribute(llvm::Attribute::SwiftError);
2873 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2877 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2882 Attrs.addAttribute(llvm::Attribute::NoCapture);
2884 if (Attrs.hasAttributes()) {
2885 unsigned FirstIRArg, NumIRArgs;
2886 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2887 for (
unsigned i = 0; i < NumIRArgs; i++)
2888 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2894 AttrList = llvm::AttributeList::get(
2903 llvm::Value *value) {
2904 llvm::Type *varType = CGF.
ConvertType(var->getType());
2908 if (value->getType() == varType)
return value;
2910 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2911 &&
"unexpected promotion type");
2913 if (isa<llvm::IntegerType>(varType))
2914 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
2916 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
2922 QualType ArgType,
unsigned ArgNo) {
2934 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
2941 if (NNAttr->isNonNull(ArgNo))
2971 if (FD->hasImplicitReturnZero()) {
2972 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2974 llvm::Constant*
Zero = llvm::Constant::getNullValue(LLVMTy);
2983 assert(
Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2988 if (IRFunctionArgs.hasInallocaArg())
2989 ArgStruct =
Address(
Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2993 if (IRFunctionArgs.hasSRetArg()) {
2994 auto AI =
Fn->getArg(IRFunctionArgs.getSRetArgNo());
2995 AI->setName(
"agg.result");
2996 AI->addAttr(llvm::Attribute::NoAlias);
3003 ArgVals.reserve(Args.size());
3009 assert(FI.
arg_size() == Args.size() &&
3010 "Mismatch between function signature & arguments.");
3013 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
3014 i != e; ++i, ++info_it, ++ArgNo) {
3019 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
3027 unsigned FirstIRArg, NumIRArgs;
3028 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3032 assert(NumIRArgs == 0);
3045 assert(NumIRArgs == 1);
3069 ParamAddr = AlignedTemp;
3086 auto AI =
Fn->getArg(FirstIRArg);
3094 assert(NumIRArgs == 1);
3096 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3099 PVD->getFunctionScopeIndex()) &&
3101 AI->addAttr(llvm::Attribute::NonNull);
3103 QualType OTy = PVD->getOriginalType();
3104 if (
const auto *ArrTy =
3111 QualType ETy = ArrTy->getElementType();
3112 llvm::Align Alignment =
3114 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3115 uint64_t ArrSize = ArrTy->getZExtSize();
3119 Attrs.addDereferenceableAttr(
3120 getContext().getTypeSizeInChars(ETy).getQuantity() *
3122 AI->addAttrs(Attrs);
3123 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3126 AI->addAttr(llvm::Attribute::NonNull);
3129 }
else if (
const auto *ArrTy =
3135 QualType ETy = ArrTy->getElementType();
3136 llvm::Align Alignment =
3138 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3139 if (!
getTypes().getTargetAddressSpace(ETy) &&
3141 AI->addAttr(llvm::Attribute::NonNull);
3146 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3149 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3150 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3154 llvm::ConstantInt *AlignmentCI =
3157 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3158 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3159 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3160 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(
3161 llvm::Align(AlignmentInt)));
3168 AI->addAttr(llvm::Attribute::NoAlias);
3176 assert(NumIRArgs == 1);
3180 llvm::Value *
V = AI;
3188 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3211 if (
V->getType() != LTy)
3222 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3223 llvm::Value *ArgVal =
Fn->getArg(FirstIRArg);
3224 if (
auto *VecTyFrom =
3225 dyn_cast<llvm::ScalableVectorType>(ArgVal->getType())) {
3227 *
this, VecTyTo, VecTyFrom, ArgVal, Arg->
getName());
3229 assert(NumIRArgs == 1);
3236 llvm::StructType *STy =
3239 STy->getNumElements() > 1) {
3240 [[maybe_unused]] llvm::TypeSize StructSize =
3242 [[maybe_unused]] llvm::TypeSize PtrElementSize =
3244 if (STy->containsHomogeneousScalableVectorTypes()) {
3245 assert(StructSize == PtrElementSize &&
3246 "Only allow non-fractional movement of structure with"
3247 "homogeneous scalable vector type");
3263 STy->getNumElements() > 1) {
3265 llvm::TypeSize PtrElementSize =
3267 if (StructSize.isScalable()) {
3268 assert(STy->containsHomogeneousScalableVectorTypes() &&
3269 "ABI only supports structure with homogeneous scalable vector "
3271 assert(StructSize == PtrElementSize &&
3272 "Only allow non-fractional movement of structure with"
3273 "homogeneous scalable vector type");
3274 assert(STy->getNumElements() == NumIRArgs);
3276 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3277 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3278 auto *AI =
Fn->getArg(FirstIRArg + i);
3279 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3281 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3286 uint64_t SrcSize = StructSize.getFixedValue();
3287 uint64_t DstSize = PtrElementSize.getFixedValue();
3290 if (SrcSize <= DstSize) {
3297 assert(STy->getNumElements() == NumIRArgs);
3298 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3299 auto AI =
Fn->getArg(FirstIRArg + i);
3300 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3305 if (SrcSize > DstSize) {
3311 assert(NumIRArgs == 1);
3312 auto AI =
Fn->getArg(FirstIRArg);
3313 AI->setName(Arg->
getName() +
".coerce");
3316 llvm::TypeSize::getFixed(
3317 getContext().getTypeSizeInChars(Ty).getQuantity() -
3342 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3346 unsigned argIndex = FirstIRArg;
3347 unsigned unpaddedIndex = 0;
3348 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3349 llvm::Type *eltType = coercionType->getElementType(i);
3354 llvm::Value *elt =
Fn->getArg(argIndex++);
3356 auto paramType = unpaddedStruct
3357 ? unpaddedStruct->getElementType(unpaddedIndex++)
3358 : unpaddedCoercionType;
3360 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(eltType)) {
3361 if (
auto *VecTyFrom = dyn_cast<llvm::ScalableVectorType>(paramType)) {
3364 *
this, VecTyTo, VecTyFrom, elt, elt->getName());
3365 assert(Extracted &&
"Unexpected scalable to fixed vector coercion");
3370 assert(argIndex == FirstIRArg + NumIRArgs);
3382 auto FnArgIter =
Fn->arg_begin() + FirstIRArg;
3383 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3384 assert(FnArgIter ==
Fn->arg_begin() + FirstIRArg + NumIRArgs);
3385 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3386 auto AI =
Fn->getArg(FirstIRArg + i);
3387 AI->setName(Arg->
getName() +
"." + Twine(i));
3393 assert(NumIRArgs == 0);
3405 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3406 for (
int I = Args.size() - 1; I >= 0; --I)
3409 for (
unsigned I = 0,
E = Args.size(); I !=
E; ++I)
3415 while (insn->use_empty()) {
3416 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3417 if (!bitcast)
return;
3420 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3421 bitcast->eraseFromParent();
3427 llvm::Value *result) {
3429 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3430 if (BB->empty())
return nullptr;
3431 if (&BB->back() != result)
return nullptr;
3433 llvm::Type *resultType = result->getType();
3436 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3442 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3445 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3448 if (generator->getNextNode() != bitcast)
3451 InstsToKill.push_back(bitcast);
3458 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3459 if (!call)
return nullptr;
3461 bool doRetainAutorelease;
3464 doRetainAutorelease =
true;
3465 }
else if (call->getCalledOperand() ==
3467 doRetainAutorelease =
false;
3475 llvm::Instruction *prev = call->getPrevNode();
3477 if (isa<llvm::BitCastInst>(prev)) {
3478 prev = prev->getPrevNode();
3481 assert(isa<llvm::CallInst>(prev));
3482 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3484 InstsToKill.push_back(prev);
3490 result = call->getArgOperand(0);
3491 InstsToKill.push_back(call);
3495 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3496 if (!bitcast->hasOneUse())
break;
3497 InstsToKill.push_back(bitcast);
3498 result = bitcast->getOperand(0);
3502 for (
auto *I : InstsToKill)
3503 I->eraseFromParent();
3506 if (doRetainAutorelease)
3510 return CGF.
Builder.CreateBitCast(result, resultType);
3515 llvm::Value *result) {
3518 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3519 if (!method)
return nullptr;
3525 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3526 if (!retainCall || retainCall->getCalledOperand() !=
3531 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3532 llvm::LoadInst *load =
3533 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3534 if (!load || load->isAtomic() || load->isVolatile() ||
3541 llvm::Type *resultType = result->getType();
3543 assert(retainCall->use_empty());
3544 retainCall->eraseFromParent();
3547 return CGF.
Builder.CreateBitCast(load, resultType);
3554 llvm::Value *result) {
3577 auto GetStoreIfValid = [&CGF,
3578 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3579 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3580 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3586 assert(!SI->isAtomic() &&
3594 if (!ReturnValuePtr->hasOneUse()) {
3595 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3596 if (IP->empty())
return nullptr;
3600 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3601 if (isa<llvm::BitCastInst>(&I))
3603 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3604 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3607 return GetStoreIfValid(&I);
3612 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3613 if (!store)
return nullptr;
3617 llvm::BasicBlock *StoreBB = store->getParent();
3618 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3620 while (IP != StoreBB) {
3621 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3637 int BitWidth,
int CharWidth) {
3638 assert(CharWidth <= 64);
3639 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3642 if (BitOffset >= CharWidth) {
3643 Pos += BitOffset / CharWidth;
3644 BitOffset = BitOffset % CharWidth;
3647 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3648 if (BitOffset + BitWidth >= CharWidth) {
3649 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3650 BitWidth -= CharWidth - BitOffset;
3654 while (BitWidth >= CharWidth) {
3656 BitWidth -= CharWidth;
3660 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3668 int StorageSize,
int BitOffset,
int BitWidth,
3669 int CharWidth,
bool BigEndian) {
3672 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3675 std::reverse(TmpBits.begin(), TmpBits.end());
3677 for (uint64_t
V : TmpBits)
3678 Bits[StorageOffset++] |=
V;
3709 BFI.
Size, CharWidth,
3731 auto Src = TmpBits.begin();
3732 auto Dst = Bits.begin() + Offset + I * Size;
3733 for (
int J = 0; J < Size; ++J)
3753 std::fill_n(Bits.begin() + Offset, Size,
3758 int Pos,
int Size,
int CharWidth,
3763 for (
auto P = Bits.begin() + Pos,
E = Bits.begin() + Pos + Size;
P !=
E;
3765 Mask = (Mask << CharWidth) | *
P;
3767 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3769 Mask = (Mask << CharWidth) | *--
P;
3778 llvm::IntegerType *ITy,
3780 assert(Src->getType() == ITy);
3781 assert(ITy->getScalarSizeInBits() <= 64);
3784 int Size = DataLayout.getTypeStoreSize(ITy);
3792 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3798 llvm::ArrayType *ATy,
3801 int Size = DataLayout.getTypeStoreSize(ATy);
3808 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3810 llvm::Value *R = llvm::PoisonValue::get(ATy);
3811 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3813 DataLayout.isBigEndian());
3814 MaskIndex += CharsPerElt;
3815 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3816 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3817 R =
Builder.CreateInsertValue(R, T1, I);
3844 llvm::DebugLoc RetDbgLoc;
3845 llvm::Value *RV =
nullptr;
3855 llvm::Function::arg_iterator EI =
CurFn->arg_end();
3857 llvm::Value *ArgStruct = &*EI;
3861 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3867 auto AI =
CurFn->arg_begin();
3907 if (llvm::StoreInst *SI =
3913 RetDbgLoc = SI->getDebugLoc();
3915 RV = SI->getValueOperand();
3916 SI->eraseFromParent();
3939 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
3940 RT = FD->getReturnType();
3941 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
3942 RT = MD->getReturnType();
3946 llvm_unreachable(
"Unexpected function/method type");
3963 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3968 unsigned unpaddedIndex = 0;
3969 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3970 auto coercedEltType = coercionType->getElementType(i);
3977 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
3978 : unpaddedCoercionType,
3980 results.push_back(elt);
3984 if (results.size() == 1) {
3992 RV = llvm::PoisonValue::get(returnType);
3993 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
3994 RV =
Builder.CreateInsertValue(RV, results[i], i);
4001 llvm_unreachable(
"Invalid ABI kind for return argument");
4004 llvm::Instruction *
Ret;
4010 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4021 Ret->setDebugLoc(std::move(RetDbgLoc));
4034 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4035 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
4038 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4046 assert(!requiresReturnValueNullabilityCheck() &&
4047 "Cannot check nullability and the nonnull attribute");
4048 AttrLoc = RetNNAttr->getLocation();
4049 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
4050 Handler = SanitizerHandler::NonnullReturn;
4052 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4053 if (
auto *TSI = DD->getTypeSourceInfo())
4055 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4056 CheckKind = SanitizerKind::NullabilityReturn;
4057 Handler = SanitizerHandler::NullabilityReturn;
4060 SanitizerScope SanScope(
this);
4067 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4068 if (requiresReturnValueNullabilityCheck())
4070 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4071 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4075 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4077 llvm::Value *DynamicData[] = {SLocPtr};
4078 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4098 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4099 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4126 if (
type->isReferenceType()) {
4135 param->
hasAttr<NSConsumedAttr>() &&
4136 type->isObjCRetainableType()) {
4139 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4154 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4156 "cleanup for callee-destructed param not recorded");
4158 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4164 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4177 "shouldn't have writeback for provably null argument");
4188 llvm::BasicBlock *contBB =
nullptr;
4194 if (!provablyNonNull) {
4199 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4208 "icr.writeback-cast");
4217 if (writeback.
ToUse) {
4242 if (!provablyNonNull)
4251 for (
const auto &I : llvm::reverse(Cleanups)) {
4253 I.IsActiveIP->eraseFromParent();
4259 if (uop->getOpcode() == UO_AddrOf)
4260 return uop->getSubExpr();
4290 llvm::PointerType *destType =
4292 llvm::Type *destElemType =
4309 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4315 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4319 llvm::BasicBlock *contBB =
nullptr;
4320 llvm::BasicBlock *originBB =
nullptr;
4323 llvm::Value *finalArgument;
4327 if (provablyNonNull) {
4332 finalArgument = CGF.
Builder.CreateSelect(
4333 isNull, llvm::ConstantPointerNull::get(destType),
4339 originBB = CGF.
Builder.GetInsertBlock();
4342 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4344 condEval.begin(CGF);
4348 llvm::Value *valueToUse =
nullptr;
4356 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4373 if (shouldCopy && !provablyNonNull) {
4374 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4379 llvm::PHINode *phiToUse = CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
4381 phiToUse->addIncoming(valueToUse, copyBB);
4382 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4384 valueToUse = phiToUse;
4398 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4404 CGF.
Builder.CreateStackRestore(StackBase);
4412 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4417 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4418 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4421 const NonNullAttr *NNAttr =
nullptr;
4422 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4425 bool CanCheckNullability =
false;
4426 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4427 !PVD->getType()->isRecordType()) {
4428 auto Nullability = PVD->getType()->getNullability();
4429 CanCheckNullability = Nullability &&
4431 PVD->getTypeSourceInfo();
4434 if (!NNAttr && !CanCheckNullability)
4441 AttrLoc = NNAttr->getLocation();
4442 CheckKind = SanitizerKind::NonnullAttribute;
4443 Handler = SanitizerHandler::NonnullArg;
4445 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4446 CheckKind = SanitizerKind::NullabilityArg;
4447 Handler = SanitizerHandler::NullabilityArg;
4450 SanitizerScope SanScope(
this);
4452 llvm::Constant *StaticData[] = {
4454 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4456 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, {});
4461 AbstractCallee AC,
unsigned ParmNum) {
4462 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4482 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4493 return classDecl->getTypeParamListAsWritten();
4497 return catDecl->getTypeParamList();
4507 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4508 AbstractCallee AC,
unsigned ParamsToSkip, EvaluationOrder Order) {
4511 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4512 "Can't skip parameters if type info is not provided");
4522 bool IsVariadic =
false;
4529 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4530 MD->param_type_end());
4532 const auto *FPT = cast<const FunctionProtoType *>(
Prototype.P);
4533 IsVariadic = FPT->isVariadic();
4534 ExplicitCC = FPT->getExtInfo().getCC();
4535 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4536 FPT->param_type_end());
4544 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4546 (isGenericMethod || Ty->isVariablyModifiedType() ||
4547 Ty.getNonReferenceType()->isObjCRetainableType() ||
4549 .getCanonicalType(Ty.getNonReferenceType())
4551 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4552 "type mismatch in call argument!");
4558 assert((Arg == ArgRange.end() || IsVariadic) &&
4559 "Extra arguments in non-variadic function!");
4564 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4565 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4566 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4578 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4580 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4582 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4589 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4590 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(),
T,
4591 EmittedArg.getScalarVal(),
4597 std::swap(Args.back(), *(&Args.back() - 1));
4602 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4603 "inalloca only supported on x86");
4608 size_t CallArgsStart = Args.size();
4609 for (
unsigned I = 0,
E = ArgTypes.size(); I !=
E; ++I) {
4610 unsigned Idx = LeftToRight ? I :
E - I - 1;
4612 unsigned InitialArgSize = Args.size();
4615 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4616 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4618 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4620 "Argument and parameter types don't match");
4624 assert(InitialArgSize + 1 == Args.size() &&
4625 "The code below depends on only adding one arg per EmitCallArg");
4626 (void)InitialArgSize;
4629 if (!Args.back().hasLValue()) {
4630 RValue RVArg = Args.back().getKnownRValue();
4632 ParamsToSkip + Idx);
4636 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4643 std::reverse(Args.begin() + CallArgsStart, Args.end());
4654 : Addr(Addr), Ty(Ty) {}
4672struct DisableDebugLocationUpdates {
4674 bool disabledDebugInfo;
4676 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(
E) && CGF.
getDebugInfo()))
4679 ~DisableDebugLocationUpdates() {
4680 if (disabledDebugInfo)
4721 DisableDebugLocationUpdates Dis(*
this,
E);
4723 = dyn_cast<ObjCIndirectCopyRestoreExpr>(
E)) {
4737 "reference binding to unmaterialized r-value!");
4749 if (
type->isRecordType() &&
4756 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4757 if (
const auto *RD =
type->getAsCXXRecordDecl())
4758 DestroyedInCallee = RD->hasNonTrivialDestructor();
4760 NeedsCleanup =
type.isDestructedType();
4762 if (DestroyedInCallee)
4769 if (DestroyedInCallee && NeedsCleanup) {
4776 llvm::Instruction *IsActive =
4783 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(
E) &&
4784 cast<CastExpr>(
E)->getCastKind() == CK_LValueToRValue &&
4785 !
type->isArrayParameterType()) {
4795QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4799 if (!
getTarget().getTriple().isOSWindows())
4816CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4819 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4826 const llvm::Twine &name) {
4834 const llvm::Twine &name) {
4836 for (
auto arg : args)
4837 values.push_back(
arg.emitRawPointer(*
this));
4844 const llvm::Twine &name) {
4846 call->setDoesNotThrow();
4853 const llvm::Twine &name) {
4868 if (
auto *CalleeFn = dyn_cast<llvm::Function>(
Callee->stripPointerCasts())) {
4869 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4870 auto IID = CalleeFn->getIntrinsicID();
4871 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4884 const llvm::Twine &name) {
4885 llvm::CallInst *call =
Builder.CreateCall(
4901 llvm::InvokeInst *invoke =
4907 invoke->setDoesNotReturn();
4910 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
4911 call->setDoesNotReturn();
4920 const Twine &name) {
4928 const Twine &name) {
4938 const Twine &Name) {
4943 llvm::CallBase *Inst;
4945 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
4948 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4956 AddObjCARCExceptionMetadata(Inst);
4961void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4963 DeferredReplacements.push_back(
4964 std::make_pair(llvm::WeakTrackingVH(Old), New));
4971[[nodiscard]] llvm::AttributeList
4972maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4973 const llvm::AttributeList &Attrs,
4974 llvm::Align NewAlign) {
4975 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4976 if (CurAlign >= NewAlign)
4978 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4979 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4980 .addRetAttribute(Ctx, AlignAttr);
4983template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
4988 const AlignedAttrTy *AA =
nullptr;
4990 llvm::Value *Alignment =
nullptr;
4991 llvm::ConstantInt *OffsetCI =
nullptr;
4997 AA = FuncDecl->
getAttr<AlignedAttrTy>();
5002 [[nodiscard]] llvm::AttributeList
5003 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5004 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5006 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5011 if (!AlignmentCI->getValue().isPowerOf2())
5013 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5016 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5028 AA->getLocation(), Alignment, OffsetCI);
5034class AssumeAlignedAttrEmitter final
5035 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5038 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5042 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
5043 if (
Expr *Offset = AA->getOffset()) {
5045 if (OffsetCI->isNullValue())
5052class AllocAlignAttrEmitter final
5053 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5057 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5061 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5070 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5071 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5072 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))