32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
46using namespace CodeGen;
52 default:
return llvm::CallingConv::C;
57 case CC_Win64:
return llvm::CallingConv::Win64;
59 case CC_AAPCS:
return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
73 case CC_Swift:
return llvm::CallingConv::Swift;
75 case CC_M68kRTD:
return llvm::CallingConv::M68k_RTD;
129 unsigned totalArgs) {
131 assert(paramInfos.size() <= prefixArgs);
132 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
134 paramInfos.reserve(totalArgs);
137 paramInfos.resize(prefixArgs);
141 paramInfos.push_back(ParamInfo);
143 if (ParamInfo.hasPassObjectSize())
144 paramInfos.emplace_back();
147 assert(paramInfos.size() <= totalArgs &&
148 "Did we forget to insert pass_object_size args?");
150 paramInfos.resize(totalArgs);
160 if (!FPT->hasExtParameterInfos()) {
161 assert(paramInfos.empty() &&
162 "We have paramInfos, but the prototype doesn't?");
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
167 unsigned PrefixSize = prefix.size();
171 prefix.reserve(prefix.size() + FPT->getNumParams());
173 auto ExtInfos = FPT->getExtParameterInfos();
174 assert(ExtInfos.size() == FPT->getNumParams());
175 for (
unsigned I = 0,
E = FPT->getNumParams(); I !=
E; ++I) {
176 prefix.push_back(FPT->getParamType(I));
177 if (ExtInfos[I].hasPassObjectSize())
200 FTP->getExtInfo(), paramInfos,
Required);
208 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
233 if (PcsAttr *PCS =
D->
getAttr<PcsAttr>())
236 if (
D->
hasAttr<AArch64VectorPcsAttr>())
239 if (
D->
hasAttr<AArch64SVEPcsAttr>())
242 if (
D->
hasAttr<AMDGPUKernelCallAttr>())
266 if (
D->
hasAttr<RISCVVectorCCAttr>())
287 return ::arrangeLLVMFunctionInfo(
288 *
this,
true, argTypes,
295 if (FD->
hasAttr<CUDAGlobalAttr>()) {
308 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
309 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
330 !
Target.getCXXABI().hasConstructorVariants();
335 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
343 bool PassParams =
true;
345 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
348 if (
auto Inherited = CD->getInheritedConstructor())
360 if (!paramInfos.empty()) {
363 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
366 paramInfos.append(AddedArgs.
Suffix,
371 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
381 argTypes, extInfo, paramInfos, required);
387 for (
auto &arg : args)
395 for (
auto &arg : args)
402 unsigned prefixArgs,
unsigned totalArgs) {
422 unsigned ExtraPrefixArgs,
423 unsigned ExtraSuffixArgs,
424 bool PassProtoArgs) {
427 for (
const auto &Arg : args)
431 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
436 FPT, TotalPrefixArgs + ExtraSuffixArgs)
450 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
457 ArgTypes, Info, ParamInfos,
Required);
465 if (MD->isImplicitObjectMemberFunction())
470 assert(isa<FunctionType>(FTy));
477 std::nullopt, noProto->getExtInfo(), {},
512 I->hasAttr<NoEscapeAttr>());
513 extParamInfos.push_back(extParamInfo);
520 if (
getContext().getLangOpts().ObjCAutoRefCount &&
521 MD->
hasAttr<NSReturnsRetainedAttr>())
547 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
548 isa<CXXDestructorDecl>(GD.
getDecl()))
561 assert(MD->
isVirtual() &&
"only methods have thunks");
578 ArgTys.push_back(*FTP->param_type_begin());
580 ArgTys.push_back(Context.
IntTy);
595 unsigned numExtraRequiredArgs,
597 assert(args.size() >= numExtraRequiredArgs);
607 if (proto->isVariadic())
610 if (proto->hasExtParameterInfos())
620 cast<FunctionNoProtoType>(fnType))) {
626 for (
const auto &arg : args)
631 paramInfos, required);
643 chainCall ? 1 : 0, chainCall);
672 for (
const auto &Arg : args)
705 unsigned numPrefixArgs) {
706 assert(numPrefixArgs + 1 <= args.size() &&
707 "Emitting a call with less args than the required prefix?");
719 paramInfos, required);
731 assert(signature.
arg_size() <= args.size());
732 if (signature.
arg_size() == args.size())
737 if (!sigParamInfos.empty()) {
738 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
739 paramInfos.resize(args.size());
771 assert(llvm::all_of(argTypes,
775 llvm::FoldingSetNodeID ID;
780 bool isDelegateCall =
783 info, paramInfos, required, resultType, argTypes);
785 void *insertPos =
nullptr;
786 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
794 info, paramInfos, resultType, argTypes, required);
795 FunctionInfos.InsertNode(FI, insertPos);
797 bool inserted = FunctionsBeingProcessed.insert(FI).second;
799 assert(inserted &&
"Recursively being processed?");
802 if (CC == llvm::CallingConv::SPIR_KERNEL) {
820 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
823 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
824 assert(erased &&
"Not in set?");
830 bool chainCall,
bool delegateCall,
836 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
841 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
842 argTypes.size() + 1, paramInfos.size()));
845 FI->CallingConvention = llvmCC;
846 FI->EffectiveCallingConvention = llvmCC;
847 FI->ASTCallingConvention = info.
getCC();
848 FI->InstanceMethod = instanceMethod;
849 FI->ChainCall = chainCall;
850 FI->DelegateCall = delegateCall;
856 FI->Required = required;
859 FI->ArgStruct =
nullptr;
860 FI->ArgStructAlign = 0;
861 FI->NumArgs = argTypes.size();
862 FI->HasExtParameterInfos = !paramInfos.empty();
863 FI->getArgsBuffer()[0].
type = resultType;
864 FI->MaxVectorWidth = 0;
865 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
866 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
867 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
868 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
878struct TypeExpansion {
879 enum TypeExpansionKind {
891 const TypeExpansionKind
Kind;
893 TypeExpansion(TypeExpansionKind K) :
Kind(K) {}
894 virtual ~TypeExpansion() {}
897struct ConstantArrayExpansion : TypeExpansion {
901 ConstantArrayExpansion(
QualType EltTy, uint64_t NumElts)
902 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
903 static bool classof(
const TypeExpansion *TE) {
904 return TE->Kind == TEK_ConstantArray;
908struct RecordExpansion : TypeExpansion {
915 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
916 Fields(
std::move(Fields)) {}
917 static bool classof(
const TypeExpansion *TE) {
918 return TE->Kind == TEK_Record;
922struct ComplexExpansion : TypeExpansion {
926 static bool classof(
const TypeExpansion *TE) {
931struct NoExpansion : TypeExpansion {
932 NoExpansion() : TypeExpansion(TEK_None) {}
933 static bool classof(
const TypeExpansion *TE) {
934 return TE->Kind == TEK_None;
939static std::unique_ptr<TypeExpansion>
942 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
950 "Cannot expand structure with flexible array.");
957 for (
const auto *FD : RD->
fields()) {
958 if (FD->isZeroLengthBitField(Context))
960 assert(!FD->isBitField() &&
961 "Cannot expand structure with bit-field members.");
963 if (UnionSize < FieldSize) {
964 UnionSize = FieldSize;
969 Fields.push_back(LargestFD);
971 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
972 assert(!CXXRD->isDynamicClass() &&
973 "cannot expand vtable pointers in dynamic classes");
974 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
977 for (
const auto *FD : RD->
fields()) {
978 if (FD->isZeroLengthBitField(Context))
980 assert(!FD->isBitField() &&
981 "Cannot expand structure with bit-field members.");
982 Fields.push_back(FD);
985 return std::make_unique<RecordExpansion>(std::move(Bases),
989 return std::make_unique<ComplexExpansion>(CT->getElementType());
991 return std::make_unique<NoExpansion>();
996 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
999 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1001 for (
auto BS : RExp->Bases)
1003 for (
auto FD : RExp->Fields)
1007 if (isa<ComplexExpansion>(Exp.get()))
1009 assert(isa<NoExpansion>(Exp.get()));
1017 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1018 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1021 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1022 for (
auto BS : RExp->Bases)
1024 for (
auto FD : RExp->Fields)
1026 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1031 assert(isa<NoExpansion>(Exp.get()));
1037 ConstantArrayExpansion *CAE,
1039 llvm::function_ref<
void(
Address)> Fn) {
1040 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1047 llvm::Function::arg_iterator &AI) {
1049 "Unexpected non-simple lvalue during struct expansion.");
1052 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1055 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1056 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1058 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1068 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1070 for (
auto FD : RExp->Fields) {
1073 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1075 }
else if (isa<ComplexExpansion>(Exp.get())) {
1076 auto realValue = &*AI++;
1077 auto imagValue = &*AI++;
1082 assert(isa<NoExpansion>(Exp.get()));
1083 llvm::Value *Arg = &*AI++;
1090 if (Arg->getType()->isPointerTy()) {
1099void CodeGenFunction::ExpandTypeToArgs(
1103 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1107 *
this, CAExp, Addr, [&](
Address EltAddr) {
1111 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1114 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1125 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1130 for (
auto FD : RExp->Fields) {
1133 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1136 }
else if (isa<ComplexExpansion>(Exp.get())) {
1138 IRCallArgs[IRCallArgPos++] = CV.first;
1139 IRCallArgs[IRCallArgPos++] = CV.second;
1141 assert(isa<NoExpansion>(Exp.get()));
1143 assert(RV.isScalar() &&
1144 "Unexpected non-scalar rvalue during struct expansion.");
1147 llvm::Value *
V = RV.getScalarVal();
1148 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1149 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1150 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1152 IRCallArgs[IRCallArgPos++] =
V;
1160 const Twine &Name =
"tmp") {
1174 llvm::StructType *SrcSTy,
1177 if (SrcSTy->getNumElements() == 0)
return SrcPtr;
1185 uint64_t FirstEltSize =
1187 if (FirstEltSize < DstSize &&
1196 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1212 if (Val->getType() == Ty)
1215 if (isa<llvm::PointerType>(Val->getType())) {
1217 if (isa<llvm::PointerType>(Ty))
1218 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1224 llvm::Type *DestIntTy = Ty;
1225 if (isa<llvm::PointerType>(DestIntTy))
1228 if (Val->getType() != DestIntTy) {
1230 if (DL.isBigEndian()) {
1233 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1234 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1236 if (SrcSize > DstSize) {
1237 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1238 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1240 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1241 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1245 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1249 if (isa<llvm::PointerType>(Ty))
1250 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1273 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1275 DstSize.getFixedValue(), CGF);
1283 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1284 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1290 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1291 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1305 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1306 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1309 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1310 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1311 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1312 ScalableDstTy = llvm::ScalableVectorType::get(
1313 FixedSrcTy->getElementType(),
1314 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1316 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1318 auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
1319 auto *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1321 ScalableDstTy, UndefVec, Load, Zero,
"cast.scalable");
1322 if (ScalableDstTy != Ty)
1335 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1340 llvm::TypeSize DstSize,
1341 bool DstIsVolatile) {
1345 llvm::Type *SrcTy = Src->getType();
1352 if (llvm::StructType *DstSTy =
1354 assert(!SrcSize.isScalable());
1356 SrcSize.getFixedValue(), *
this);
1360 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1361 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1366 }
else if (llvm::StructType *STy =
1367 dyn_cast<llvm::StructType>(Src->getType())) {
1370 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1372 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1378 }
else if (SrcTy->isIntegerTy()) {
1380 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1418class ClangToLLVMArgMapping {
1419 static const unsigned InvalidIndex = ~0
U;
1420 unsigned InallocaArgNo;
1422 unsigned TotalIRArgs;
1426 unsigned PaddingArgIndex;
1429 unsigned FirstArgIndex;
1430 unsigned NumberOfArgs;
1433 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1441 bool OnlyRequiredArgs =
false)
1442 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1443 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1444 construct(Context, FI, OnlyRequiredArgs);
1447 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1448 unsigned getInallocaArgNo()
const {
1449 assert(hasInallocaArg());
1450 return InallocaArgNo;
1453 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1454 unsigned getSRetArgNo()
const {
1455 assert(hasSRetArg());
1459 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1461 bool hasPaddingArg(
unsigned ArgNo)
const {
1462 assert(ArgNo < ArgInfo.size());
1463 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1465 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1466 assert(hasPaddingArg(ArgNo));
1467 return ArgInfo[ArgNo].PaddingArgIndex;
1472 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1473 assert(ArgNo < ArgInfo.size());
1474 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1475 ArgInfo[ArgNo].NumberOfArgs);
1480 bool OnlyRequiredArgs);
1483void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1485 bool OnlyRequiredArgs) {
1486 unsigned IRArgNo = 0;
1487 bool SwapThisWithSRet =
false;
1492 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1503 auto &IRArgs = ArgInfo[ArgNo];
1506 IRArgs.PaddingArgIndex = IRArgNo++;
1512 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1514 IRArgs.NumberOfArgs = STy->getNumElements();
1516 IRArgs.NumberOfArgs = 1;
1522 IRArgs.NumberOfArgs = 1;
1527 IRArgs.NumberOfArgs = 0;
1537 if (IRArgs.NumberOfArgs > 0) {
1538 IRArgs.FirstArgIndex = IRArgNo;
1539 IRArgNo += IRArgs.NumberOfArgs;
1544 if (IRArgNo == 1 && SwapThisWithSRet)
1547 assert(ArgNo == ArgInfo.size());
1550 InallocaArgNo = IRArgNo++;
1552 TotalIRArgs = IRArgNo;
1560 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1575 switch (BT->getKind()) {
1578 case BuiltinType::Float:
1580 case BuiltinType::Double:
1582 case BuiltinType::LongDouble:
1593 if (BT->getKind() == BuiltinType::LongDouble)
1609 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1611 assert(Inserted &&
"Recursively being processed?");
1613 llvm::Type *resultType =
nullptr;
1618 llvm_unreachable(
"Invalid ABI kind for return argument");
1630 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1646 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1650 if (IRFunctionArgs.hasSRetArg()) {
1653 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1658 if (IRFunctionArgs.hasInallocaArg())
1659 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1666 for (; it != ie; ++it, ++ArgNo) {
1670 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1671 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1674 unsigned FirstIRArg, NumIRArgs;
1675 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1680 assert(NumIRArgs == 0);
1684 assert(NumIRArgs == 1);
1686 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1690 assert(NumIRArgs == 1);
1691 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1699 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1701 assert(NumIRArgs == st->getNumElements());
1702 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1703 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1705 assert(NumIRArgs == 1);
1706 ArgTypes[FirstIRArg] = argType;
1712 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1714 *ArgTypesIter++ = EltTy;
1716 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1721 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1723 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1728 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1729 assert(Erased &&
"Not in set?");
1731 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1745 llvm::AttrBuilder &FuncAttrs,
1752 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1756 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1758 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1762 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1764 FuncAttrs.addAttribute(
"aarch64_in_za");
1766 FuncAttrs.addAttribute(
"aarch64_out_za");
1768 FuncAttrs.addAttribute(
"aarch64_inout_za");
1772 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1774 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1776 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1778 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1782 const Decl *Callee) {
1788 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1789 AA->getAssumption().split(Attrs,
",");
1792 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1793 llvm::join(Attrs.begin(), Attrs.end(),
","));
1802 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1803 return ClassDecl->hasTrivialDestructor();
1809 const Decl *TargetDecl) {
1815 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1819 if (!
Module.getLangOpts().CPlusPlus)
1822 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1823 if (FDecl->isExternC())
1825 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1827 if (VDecl->isExternC())
1835 return Module.getCodeGenOpts().StrictReturn ||
1836 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1837 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1844 llvm::DenormalMode FP32DenormalMode,
1845 llvm::AttrBuilder &FuncAttrs) {
1846 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1847 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1849 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1850 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1858 llvm::AttrBuilder &FuncAttrs) {
1864 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1866 llvm::AttrBuilder &FuncAttrs) {
1869 if (CodeGenOpts.OptimizeSize)
1870 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1871 if (CodeGenOpts.OptimizeSize == 2)
1872 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1875 if (CodeGenOpts.DisableRedZone)
1876 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1877 if (CodeGenOpts.IndirectTlsSegRefs)
1878 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1879 if (CodeGenOpts.NoImplicitFloat)
1880 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1882 if (AttrOnCallSite) {
1887 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1889 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1891 switch (CodeGenOpts.getFramePointer()) {
1898 FuncAttrs.addAttribute(
"frame-pointer",
1900 CodeGenOpts.getFramePointer()));
1903 if (CodeGenOpts.LessPreciseFPMAD)
1904 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
1906 if (CodeGenOpts.NullPointerIsValid)
1907 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1910 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
1914 if (LangOpts.NoHonorInfs)
1915 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
1916 if (LangOpts.NoHonorNaNs)
1917 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
1918 if (LangOpts.ApproxFunc)
1919 FuncAttrs.addAttribute(
"approx-func-fp-math",
"true");
1920 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1921 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1922 (LangOpts.getDefaultFPContractMode() ==
1924 LangOpts.getDefaultFPContractMode() ==
1926 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
1927 if (CodeGenOpts.SoftFloat)
1928 FuncAttrs.addAttribute(
"use-soft-float",
"true");
1929 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
1930 llvm::utostr(CodeGenOpts.SSPBufferSize));
1931 if (LangOpts.NoSignedZero)
1932 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
1935 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
1936 if (!Recips.empty())
1937 FuncAttrs.addAttribute(
"reciprocal-estimates",
1938 llvm::join(Recips,
","));
1942 FuncAttrs.addAttribute(
"prefer-vector-width",
1945 if (CodeGenOpts.StackRealignment)
1946 FuncAttrs.addAttribute(
"stackrealign");
1947 if (CodeGenOpts.Backchain)
1948 FuncAttrs.addAttribute(
"backchain");
1949 if (CodeGenOpts.EnableSegmentedStacks)
1950 FuncAttrs.addAttribute(
"split-stack");
1952 if (CodeGenOpts.SpeculativeLoadHardening)
1953 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1956 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1957 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1958 FuncAttrs.removeAttribute(
"zero-call-used-regs");
1960 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1961 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
1963 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1964 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
1966 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1967 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
1969 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1970 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
1972 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1973 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
1975 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1976 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
1978 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1979 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
1981 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
1982 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
1993 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1998 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
1999 LangOpts.SYCLIsDevice) {
2000 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2003 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2004 FuncAttrs.addAttribute(
"save-reg-params");
2007 StringRef Var,
Value;
2009 FuncAttrs.addAttribute(Var,
Value);
2023 const llvm::Function &F,
2025 auto FFeatures = F.getFnAttribute(
"target-features");
2027 llvm::StringSet<> MergedNames;
2029 MergedFeatures.reserve(TargetOpts.
Features.size());
2031 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2032 for (StringRef Feature : FeatureRange) {
2033 if (Feature.empty())
2035 assert(Feature[0] ==
'+' || Feature[0] ==
'-');
2036 StringRef Name = Feature.drop_front(1);
2037 bool Merged = !MergedNames.insert(Name).second;
2039 MergedFeatures.push_back(Feature);
2043 if (FFeatures.isValid())
2044 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2045 AddUnmergedFeatures(TargetOpts.
Features);
2047 if (!MergedFeatures.empty()) {
2048 llvm::sort(MergedFeatures);
2049 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2056 bool WillInternalize) {
2058 llvm::AttrBuilder FuncAttrs(F.getContext());
2061 if (!TargetOpts.
CPU.empty())
2062 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2063 if (!TargetOpts.
TuneCPU.empty())
2064 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2067 CodeGenOpts, LangOpts,
2070 if (!WillInternalize && F.isInterposable()) {
2075 F.addFnAttrs(FuncAttrs);
2079 llvm::AttributeMask AttrsToRemove;
2081 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2082 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2083 llvm::DenormalMode Merged =
2087 if (DenormModeToMergeF32.isValid()) {
2092 if (Merged == llvm::DenormalMode::getDefault()) {
2093 AttrsToRemove.addAttribute(
"denormal-fp-math");
2094 }
else if (Merged != DenormModeToMerge) {
2096 FuncAttrs.addAttribute(
"denormal-fp-math",
2100 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2101 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2102 }
else if (MergedF32 != DenormModeToMergeF32) {
2104 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2108 F.removeFnAttrs(AttrsToRemove);
2113 F.addFnAttrs(FuncAttrs);
2116void CodeGenModule::getTrivialDefaultFunctionAttributes(
2117 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2118 llvm::AttrBuilder &FuncAttrs) {
2119 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2124void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2126 bool AttrOnCallSite,
2127 llvm::AttrBuilder &FuncAttrs) {
2128 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2132 if (!AttrOnCallSite)
2137 llvm::AttrBuilder &attrs) {
2138 getDefaultFunctionAttributes(
"",
false,
2140 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2145 const NoBuiltinAttr *NBA =
nullptr) {
2146 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2148 AttributeName +=
"no-builtin-";
2149 AttributeName += BuiltinName;
2150 FuncAttrs.addAttribute(AttributeName);
2154 if (LangOpts.NoBuiltin) {
2156 FuncAttrs.addAttribute(
"no-builtins");
2170 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2171 FuncAttrs.addAttribute(
"no-builtins");
2176 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2180 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2181 bool CheckCoerce =
true) {
2182 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2188 if (!DL.typeSizeEqualsStoreSize(Ty))
2195 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2196 DL.getTypeSizeInBits(Ty)))
2220 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2222 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2231 unsigned NumRequiredArgs,
unsigned ArgNo) {
2232 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2237 if (ArgNo >= NumRequiredArgs)
2241 if (ArgNo < FD->getNumParams()) {
2242 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2243 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2260 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2263 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2265 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2266 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2275 llvm::FPClassTest Mask = llvm::fcNone;
2276 if (LangOpts.NoHonorInfs)
2277 Mask |= llvm::fcInf;
2278 if (LangOpts.NoHonorNaNs)
2279 Mask |= llvm::fcNan;
2285 llvm::AttributeList &Attrs) {
2286 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2287 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2288 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2314 llvm::AttributeList &AttrList,
2316 bool AttrOnCallSite,
bool IsThunk) {
2324 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2326 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2338 bool HasOptnone =
false;
2340 const NoBuiltinAttr *NBA =
nullptr;
2344 auto AddPotentialArgAccess = [&]() {
2345 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2347 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2348 llvm::MemoryEffects::argMemOnly());
2355 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2356 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2357 if (TargetDecl->
hasAttr<NoThrowAttr>())
2358 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2359 if (TargetDecl->
hasAttr<NoReturnAttr>())
2360 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2361 if (TargetDecl->
hasAttr<ColdAttr>())
2362 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2363 if (TargetDecl->
hasAttr<HotAttr>())
2364 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2365 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2366 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2367 if (TargetDecl->
hasAttr<ConvergentAttr>())
2368 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2370 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2373 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2375 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2377 (Kind == OO_New || Kind == OO_Array_New))
2378 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2381 const bool IsVirtualCall = MD && MD->
isVirtual();
2384 if (!(AttrOnCallSite && IsVirtualCall)) {
2385 if (Fn->isNoReturn())
2386 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2387 NBA = Fn->getAttr<NoBuiltinAttr>();
2391 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2394 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2395 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2399 if (TargetDecl->
hasAttr<ConstAttr>()) {
2400 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2401 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2404 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2405 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2406 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2407 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2409 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2410 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2411 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2412 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2414 if (TargetDecl->
hasAttr<RestrictAttr>())
2415 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2416 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2417 !CodeGenOpts.NullPointerIsValid)
2418 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2419 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2420 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2421 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2422 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2423 if (TargetDecl->
hasAttr<LeafAttr>())
2424 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2426 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2427 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2428 std::optional<unsigned> NumElemsParam;
2429 if (AllocSize->getNumElemsParam().isValid())
2430 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2431 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2435 if (TargetDecl->
hasAttr<OpenCLKernelAttr>()) {
2438 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2445 FuncAttrs.addAttribute(
2446 "uniform-work-group-size",
2447 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2451 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2453 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2455 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2456 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2468 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2473 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2474 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2475 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2476 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2477 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2478 FuncAttrs.removeAttribute(
"split-stack");
2479 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2482 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2483 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2484 FuncAttrs.addAttribute(
2485 "zero-call-used-regs",
2486 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2493 if (CodeGenOpts.NoPLT) {
2494 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2495 if (!Fn->isDefined() && !AttrOnCallSite) {
2496 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2501 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2502 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2507 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2508 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2509 if (!FD->isExternallyVisible())
2510 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2517 if (!AttrOnCallSite) {
2518 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2519 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2522 auto shouldDisableTailCalls = [&] {
2524 if (CodeGenOpts.DisableTailCalls)
2530 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2531 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2534 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2535 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2536 if (!BD->doesNotEscape())
2542 if (shouldDisableTailCalls())
2543 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2547 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2551 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2558 if (CodeGenOpts.EnableNoundefAttrs &&
2562 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2568 RetAttrs.addAttribute(llvm::Attribute::SExt);
2570 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2574 RetAttrs.addAttribute(llvm::Attribute::InReg);
2586 AddPotentialArgAccess();
2595 llvm_unreachable(
"Invalid ABI kind for return argument");
2603 RetAttrs.addDereferenceableAttr(
2605 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2606 !CodeGenOpts.NullPointerIsValid)
2607 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2609 llvm::Align Alignment =
2611 RetAttrs.addAlignmentAttr(Alignment);
2616 bool hasUsedSRet =
false;
2620 if (IRFunctionArgs.hasSRetArg()) {
2622 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2623 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2624 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2627 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2629 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2634 if (IRFunctionArgs.hasInallocaArg()) {
2637 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2646 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2648 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2655 if (!CodeGenOpts.NullPointerIsValid &&
2657 Attrs.addAttribute(llvm::Attribute::NonNull);
2664 Attrs.addDereferenceableOrNullAttr(
2670 llvm::Align Alignment =
2674 Attrs.addAlignmentAttr(Alignment);
2676 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2682 I !=
E; ++I, ++ArgNo) {
2688 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2690 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2691 llvm::AttributeSet::get(
2693 llvm::AttrBuilder(
getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2698 if (CodeGenOpts.EnableNoundefAttrs &&
2700 Attrs.addAttribute(llvm::Attribute::NoUndef);
2709 Attrs.addAttribute(llvm::Attribute::SExt);
2711 Attrs.addAttribute(llvm::Attribute::ZExt);
2715 Attrs.addAttribute(llvm::Attribute::Nest);
2717 Attrs.addAttribute(llvm::Attribute::InReg);
2718 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2725 Attrs.addAttribute(llvm::Attribute::InReg);
2728 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2731 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2732 Decl->getArgPassingRestrictions() ==
2736 Attrs.addAttribute(llvm::Attribute::NoAlias);
2761 AddPotentialArgAccess();
2766 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2777 AddPotentialArgAccess();
2784 Attrs.addDereferenceableAttr(
2786 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2787 !CodeGenOpts.NullPointerIsValid)
2788 Attrs.addAttribute(llvm::Attribute::NonNull);
2790 llvm::Align Alignment =
2792 Attrs.addAlignmentAttr(Alignment);
2800 if (TargetDecl && TargetDecl->
hasAttr<OpenCLKernelAttr>() &&
2804 llvm::Align Alignment =
2806 Attrs.addAlignmentAttr(Alignment);
2818 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2823 Attrs.addAttribute(llvm::Attribute::NoAlias);
2827 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2829 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2830 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2836 Attrs.addAttribute(llvm::Attribute::SwiftError);
2840 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2844 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2849 Attrs.addAttribute(llvm::Attribute::NoCapture);
2851 if (Attrs.hasAttributes()) {
2852 unsigned FirstIRArg, NumIRArgs;
2853 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2854 for (
unsigned i = 0; i < NumIRArgs; i++)
2855 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2861 AttrList = llvm::AttributeList::get(
2870 llvm::Value *value) {
2871 llvm::Type *varType = CGF.
ConvertType(var->getType());
2875 if (value->getType() == varType)
return value;
2877 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2878 &&
"unexpected promotion type");
2880 if (isa<llvm::IntegerType>(varType))
2881 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
2883 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
2889 QualType ArgType,
unsigned ArgNo) {
2901 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
2908 if (NNAttr->isNonNull(ArgNo))
2938 if (FD->hasImplicitReturnZero()) {
2939 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2941 llvm::Constant*
Zero = llvm::Constant::getNullValue(LLVMTy);
2950 assert(
Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2955 if (IRFunctionArgs.hasInallocaArg())
2956 ArgStruct =
Address(
Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2960 if (IRFunctionArgs.hasSRetArg()) {
2961 auto AI =
Fn->getArg(IRFunctionArgs.getSRetArgNo());
2962 AI->setName(
"agg.result");
2963 AI->addAttr(llvm::Attribute::NoAlias);
2970 ArgVals.reserve(Args.size());
2976 assert(FI.
arg_size() == Args.size() &&
2977 "Mismatch between function signature & arguments.");
2980 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2981 i != e; ++i, ++info_it, ++ArgNo) {
2986 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2994 unsigned FirstIRArg, NumIRArgs;
2995 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2999 assert(NumIRArgs == 0);
3012 assert(NumIRArgs == 1);
3036 ParamAddr = AlignedTemp;
3053 auto AI =
Fn->getArg(FirstIRArg);
3061 assert(NumIRArgs == 1);
3063 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3066 PVD->getFunctionScopeIndex()) &&
3068 AI->addAttr(llvm::Attribute::NonNull);
3070 QualType OTy = PVD->getOriginalType();
3071 if (
const auto *ArrTy =
3078 QualType ETy = ArrTy->getElementType();
3079 llvm::Align Alignment =
3081 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3082 uint64_t ArrSize = ArrTy->getZExtSize();
3086 Attrs.addDereferenceableAttr(
3087 getContext().getTypeSizeInChars(ETy).getQuantity() *
3089 AI->addAttrs(Attrs);
3090 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3093 AI->addAttr(llvm::Attribute::NonNull);
3096 }
else if (
const auto *ArrTy =
3102 QualType ETy = ArrTy->getElementType();
3103 llvm::Align Alignment =
3105 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3106 if (!
getTypes().getTargetAddressSpace(ETy) &&
3108 AI->addAttr(llvm::Attribute::NonNull);
3113 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3116 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3117 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3121 llvm::ConstantInt *AlignmentCI =
3124 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3125 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3126 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3127 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(
3128 llvm::Align(AlignmentInt)));
3135 AI->addAttr(llvm::Attribute::NoAlias);
3143 assert(NumIRArgs == 1);
3147 llvm::Value *
V = AI;
3155 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3178 if (
V->getType() != LTy)
3189 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3190 llvm::Value *Coerced =
Fn->getArg(FirstIRArg);
3191 if (
auto *VecTyFrom =
3192 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3195 if (VecTyFrom->getElementType()->isIntegerTy(1) &&
3196 VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
3197 VecTyTo->getElementType() ==
Builder.getInt8Ty()) {
3198 VecTyFrom = llvm::ScalableVectorType::get(
3199 VecTyTo->getElementType(),
3200 VecTyFrom->getElementCount().getKnownMinValue() / 8);
3201 Coerced =
Builder.CreateBitCast(Coerced, VecTyFrom);
3203 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3206 assert(NumIRArgs == 1);
3207 Coerced->setName(Arg->
getName() +
".coerce");
3209 VecTyTo, Coerced, Zero,
"cast.fixed")));
3215 llvm::StructType *STy =
3218 STy->getNumElements() > 1) {
3219 [[maybe_unused]] llvm::TypeSize StructSize =
3221 [[maybe_unused]] llvm::TypeSize PtrElementSize =
3223 if (STy->containsHomogeneousScalableVectorTypes()) {
3224 assert(StructSize == PtrElementSize &&
3225 "Only allow non-fractional movement of structure with"
3226 "homogeneous scalable vector type");
3242 STy->getNumElements() > 1) {
3244 llvm::TypeSize PtrElementSize =
3246 if (StructSize.isScalable()) {
3247 assert(STy->containsHomogeneousScalableVectorTypes() &&
3248 "ABI only supports structure with homogeneous scalable vector "
3250 assert(StructSize == PtrElementSize &&
3251 "Only allow non-fractional movement of structure with"
3252 "homogeneous scalable vector type");
3253 assert(STy->getNumElements() == NumIRArgs);
3255 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3256 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3257 auto *AI =
Fn->getArg(FirstIRArg + i);
3258 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3260 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3265 uint64_t SrcSize = StructSize.getFixedValue();
3266 uint64_t DstSize = PtrElementSize.getFixedValue();
3269 if (SrcSize <= DstSize) {
3276 assert(STy->getNumElements() == NumIRArgs);
3277 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3278 auto AI =
Fn->getArg(FirstIRArg + i);
3279 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3284 if (SrcSize > DstSize) {
3290 assert(NumIRArgs == 1);
3291 auto AI =
Fn->getArg(FirstIRArg);
3292 AI->setName(Arg->
getName() +
".coerce");
3295 llvm::TypeSize::getFixed(
3296 getContext().getTypeSizeInChars(Ty).getQuantity() -
3322 unsigned argIndex = FirstIRArg;
3323 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3324 llvm::Type *eltType = coercionType->getElementType(i);
3329 auto elt =
Fn->getArg(argIndex++);
3332 assert(argIndex == FirstIRArg + NumIRArgs);
3344 auto FnArgIter =
Fn->arg_begin() + FirstIRArg;
3345 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3346 assert(FnArgIter ==
Fn->arg_begin() + FirstIRArg + NumIRArgs);
3347 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3348 auto AI =
Fn->getArg(FirstIRArg + i);
3349 AI->setName(Arg->
getName() +
"." + Twine(i));
3355 assert(NumIRArgs == 0);
3367 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3368 for (
int I = Args.size() - 1; I >= 0; --I)
3371 for (
unsigned I = 0,
E = Args.size(); I !=
E; ++I)
3377 while (insn->use_empty()) {
3378 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3379 if (!bitcast)
return;
3382 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3383 bitcast->eraseFromParent();
3389 llvm::Value *result) {
3391 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3392 if (BB->empty())
return nullptr;
3393 if (&BB->back() != result)
return nullptr;
3395 llvm::Type *resultType = result->getType();
3398 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3404 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3407 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3410 if (generator->getNextNode() != bitcast)
3413 InstsToKill.push_back(bitcast);
3420 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3421 if (!call)
return nullptr;
3423 bool doRetainAutorelease;
3426 doRetainAutorelease =
true;
3427 }
else if (call->getCalledOperand() ==
3429 doRetainAutorelease =
false;
3437 llvm::Instruction *prev = call->getPrevNode();
3439 if (isa<llvm::BitCastInst>(prev)) {
3440 prev = prev->getPrevNode();
3443 assert(isa<llvm::CallInst>(prev));
3444 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3446 InstsToKill.push_back(prev);
3452 result = call->getArgOperand(0);
3453 InstsToKill.push_back(call);
3457 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3458 if (!bitcast->hasOneUse())
break;
3459 InstsToKill.push_back(bitcast);
3460 result = bitcast->getOperand(0);
3464 for (
auto *I : InstsToKill)
3465 I->eraseFromParent();
3468 if (doRetainAutorelease)
3472 return CGF.
Builder.CreateBitCast(result, resultType);
3477 llvm::Value *result) {
3480 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3481 if (!method)
return nullptr;
3487 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3488 if (!retainCall || retainCall->getCalledOperand() !=
3493 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3494 llvm::LoadInst *load =
3495 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3496 if (!load || load->isAtomic() || load->isVolatile() ||
3503 llvm::Type *resultType = result->getType();
3505 assert(retainCall->use_empty());
3506 retainCall->eraseFromParent();
3509 return CGF.
Builder.CreateBitCast(load, resultType);
3516 llvm::Value *result) {
3539 auto GetStoreIfValid = [&CGF,
3540 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3541 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3542 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3548 assert(!SI->isAtomic() &&
3556 if (!ReturnValuePtr->hasOneUse()) {
3557 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3558 if (IP->empty())
return nullptr;
3562 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3563 if (isa<llvm::BitCastInst>(&I))
3565 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3566 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3569 return GetStoreIfValid(&I);
3574 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3575 if (!store)
return nullptr;
3579 llvm::BasicBlock *StoreBB = store->getParent();
3580 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3582 while (IP != StoreBB) {
3583 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3599 int BitWidth,
int CharWidth) {
3600 assert(CharWidth <= 64);
3601 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3604 if (BitOffset >= CharWidth) {
3605 Pos += BitOffset / CharWidth;
3606 BitOffset = BitOffset % CharWidth;
3609 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3610 if (BitOffset + BitWidth >= CharWidth) {
3611 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3612 BitWidth -= CharWidth - BitOffset;
3616 while (BitWidth >= CharWidth) {
3618 BitWidth -= CharWidth;
3622 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3630 int StorageSize,
int BitOffset,
int BitWidth,
3631 int CharWidth,
bool BigEndian) {
3634 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3637 std::reverse(TmpBits.begin(), TmpBits.end());
3639 for (uint64_t
V : TmpBits)
3640 Bits[StorageOffset++] |=
V;
3671 BFI.
Size, CharWidth,
3693 auto Src = TmpBits.begin();
3694 auto Dst = Bits.begin() + Offset + I * Size;
3695 for (
int J = 0; J < Size; ++J)
3715 std::fill_n(Bits.begin() + Offset, Size,
3720 int Pos,
int Size,
int CharWidth,
3725 for (
auto P = Bits.begin() + Pos,
E = Bits.begin() + Pos + Size;
P !=
E;
3727 Mask = (Mask << CharWidth) | *
P;
3729 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3731 Mask = (Mask << CharWidth) | *--
P;
3740 llvm::IntegerType *ITy,
3742 assert(Src->getType() == ITy);
3743 assert(ITy->getScalarSizeInBits() <= 64);
3746 int Size = DataLayout.getTypeStoreSize(ITy);
3754 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3760 llvm::ArrayType *ATy,
3763 int Size = DataLayout.getTypeStoreSize(ATy);
3770 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3772 llvm::Value *R = llvm::PoisonValue::get(ATy);
3773 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3775 DataLayout.isBigEndian());
3776 MaskIndex += CharsPerElt;
3777 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3778 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3779 R =
Builder.CreateInsertValue(R, T1, I);
3806 llvm::DebugLoc RetDbgLoc;
3807 llvm::Value *RV =
nullptr;
3817 llvm::Function::arg_iterator EI =
CurFn->arg_end();
3819 llvm::Value *ArgStruct = &*EI;
3823 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3829 auto AI =
CurFn->arg_begin();
3869 if (llvm::StoreInst *SI =
3875 RetDbgLoc = SI->getDebugLoc();
3877 RV = SI->getValueOperand();
3878 SI->eraseFromParent();
3901 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
3902 RT = FD->getReturnType();
3903 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
3904 RT = MD->getReturnType();
3908 llvm_unreachable(
"Unexpected function/method type");
3928 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3935 results.push_back(elt);
3939 if (results.size() == 1) {
3947 RV = llvm::PoisonValue::get(returnType);
3948 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
3949 RV =
Builder.CreateInsertValue(RV, results[i], i);
3956 llvm_unreachable(
"Invalid ABI kind for return argument");
3959 llvm::Instruction *
Ret;
3965 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3976 Ret->setDebugLoc(std::move(RetDbgLoc));
3989 ReturnsNonNullAttr *RetNNAttr =
nullptr;
3990 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
3993 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4001 assert(!requiresReturnValueNullabilityCheck() &&
4002 "Cannot check nullability and the nonnull attribute");
4003 AttrLoc = RetNNAttr->getLocation();
4004 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
4005 Handler = SanitizerHandler::NonnullReturn;
4007 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4008 if (
auto *TSI = DD->getTypeSourceInfo())
4010 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4011 CheckKind = SanitizerKind::NullabilityReturn;
4012 Handler = SanitizerHandler::NullabilityReturn;
4015 SanitizerScope SanScope(
this);
4022 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4023 if (requiresReturnValueNullabilityCheck())
4025 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4026 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4030 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4032 llvm::Value *DynamicData[] = {SLocPtr};
4033 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4053 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4054 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4081 if (
type->isReferenceType()) {
4090 param->
hasAttr<NSConsumedAttr>() &&
4091 type->isObjCRetainableType()) {
4094 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4109 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4111 "cleanup for callee-destructed param not recorded");
4113 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4119 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4132 "shouldn't have writeback for provably null argument");
4134 llvm::BasicBlock *contBB =
nullptr;
4140 if (!provablyNonNull) {
4145 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4154 "icr.writeback-cast");
4163 if (writeback.
ToUse) {
4188 if (!provablyNonNull)
4203 for (
const auto &I : llvm::reverse(Cleanups)) {
4205 I.IsActiveIP->eraseFromParent();
4211 if (uop->getOpcode() == UO_AddrOf)
4212 return uop->getSubExpr();
4242 llvm::PointerType *destType =
4244 llvm::Type *destElemType =
4261 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4267 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4271 llvm::BasicBlock *contBB =
nullptr;
4272 llvm::BasicBlock *originBB =
nullptr;
4275 llvm::Value *finalArgument;
4279 if (provablyNonNull) {
4284 finalArgument = CGF.
Builder.CreateSelect(
4285 isNull, llvm::ConstantPointerNull::get(destType),
4291 originBB = CGF.
Builder.GetInsertBlock();
4294 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4296 condEval.begin(CGF);
4300 llvm::Value *valueToUse =
nullptr;
4308 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4325 if (shouldCopy && !provablyNonNull) {
4326 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4331 llvm::PHINode *phiToUse = CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
4333 phiToUse->addIncoming(valueToUse, copyBB);
4334 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4336 valueToUse = phiToUse;
4350 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4356 CGF.
Builder.CreateStackRestore(StackBase);
4364 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4369 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4370 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4373 const NonNullAttr *NNAttr =
nullptr;
4374 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4377 bool CanCheckNullability =
false;
4378 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4379 !PVD->getType()->isRecordType()) {
4380 auto Nullability = PVD->getType()->getNullability();
4381 CanCheckNullability = Nullability &&
4383 PVD->getTypeSourceInfo();
4386 if (!NNAttr && !CanCheckNullability)
4393 AttrLoc = NNAttr->getLocation();
4394 CheckKind = SanitizerKind::NonnullAttribute;
4395 Handler = SanitizerHandler::NonnullArg;
4397 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4398 CheckKind = SanitizerKind::NullabilityArg;
4399 Handler = SanitizerHandler::NullabilityArg;
4402 SanitizerScope SanScope(
this);
4404 llvm::Constant *StaticData[] = {
4406 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4408 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4413 AbstractCallee AC,
unsigned ParmNum) {
4414 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4434 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4445 return classDecl->getTypeParamListAsWritten();
4449 return catDecl->getTypeParamList();
4459 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4460 AbstractCallee AC,
unsigned ParamsToSkip, EvaluationOrder Order) {
4463 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4464 "Can't skip parameters if type info is not provided");
4474 bool IsVariadic =
false;
4481 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4482 MD->param_type_end());
4486 ExplicitCC = FPT->getExtInfo().getCC();
4487 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4488 FPT->param_type_end());
4496 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4498 (isGenericMethod || Ty->isVariablyModifiedType() ||
4499 Ty.getNonReferenceType()->isObjCRetainableType() ||
4501 .getCanonicalType(Ty.getNonReferenceType())
4503 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4504 "type mismatch in call argument!");
4510 assert((Arg == ArgRange.end() || IsVariadic) &&
4511 "Extra arguments in non-variadic function!");
4516 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4517 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4518 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4530 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4532 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4534 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4541 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4542 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(),
T,
4543 EmittedArg.getScalarVal(),
4549 std::swap(Args.back(), *(&Args.back() - 1));
4554 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4555 "inalloca only supported on x86");
4560 size_t CallArgsStart = Args.size();
4561 for (
unsigned I = 0,
E = ArgTypes.size(); I !=
E; ++I) {
4562 unsigned Idx = LeftToRight ? I :
E - I - 1;
4564 unsigned InitialArgSize = Args.size();
4567 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4568 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4570 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4572 "Argument and parameter types don't match");
4576 assert(InitialArgSize + 1 == Args.size() &&
4577 "The code below depends on only adding one arg per EmitCallArg");
4578 (void)InitialArgSize;
4581 if (!Args.back().hasLValue()) {
4582 RValue RVArg = Args.back().getKnownRValue();
4584 ParamsToSkip + Idx);
4588 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4595 std::reverse(Args.begin() + CallArgsStart, Args.end());
4603 : Addr(Addr), Ty(Ty) {}
4621struct DisableDebugLocationUpdates {
4623 bool disabledDebugInfo;
4625 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(
E) && CGF.
getDebugInfo()))
4628 ~DisableDebugLocationUpdates() {
4629 if (disabledDebugInfo)
4665 DisableDebugLocationUpdates Dis(*
this,
E);
4667 = dyn_cast<ObjCIndirectCopyRestoreExpr>(
E)) {
4673 "reference binding to unmaterialized r-value!");
4685 if (
type->isRecordType() &&
4692 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4693 if (
const auto *RD =
type->getAsCXXRecordDecl())
4694 DestroyedInCallee = RD->hasNonTrivialDestructor();
4696 NeedsCleanup =
type.isDestructedType();
4698 if (DestroyedInCallee)
4705 if (DestroyedInCallee && NeedsCleanup) {
4712 llvm::Instruction *IsActive =
4719 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(
E) &&
4720 cast<CastExpr>(
E)->getCastKind() == CK_LValueToRValue &&
4721 !
type->isArrayParameterType()) {
4731QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4735 if (!
getTarget().getTriple().isOSWindows())
4752CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4755 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4762 const llvm::Twine &name) {
4770 const llvm::Twine &name) {
4772 for (
auto arg : args)
4773 values.push_back(
arg.emitRawPointer(*
this));
4780 const llvm::Twine &name) {
4782 call->setDoesNotThrow();
4789 const llvm::Twine &name) {
4804 if (
auto *CalleeFn = dyn_cast<llvm::Function>(
Callee->stripPointerCasts())) {
4805 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4806 auto IID = CalleeFn->getIntrinsicID();
4807 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4820 const llvm::Twine &name) {
4821 llvm::CallInst *call =
Builder.CreateCall(
4837 llvm::InvokeInst *invoke =
4843 invoke->setDoesNotReturn();
4846 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
4847 call->setDoesNotReturn();
4856 const Twine &name) {
4864 const Twine &name) {
4874 const Twine &Name) {
4879 llvm::CallBase *Inst;
4881 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
4884 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4892 AddObjCARCExceptionMetadata(Inst);
4897void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4899 DeferredReplacements.push_back(
4900 std::make_pair(llvm::WeakTrackingVH(Old), New));
4907[[nodiscard]] llvm::AttributeList
4908maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4909 const llvm::AttributeList &Attrs,
4910 llvm::Align NewAlign) {
4911 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4912 if (CurAlign >= NewAlign)
4914 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4915 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4916 .addRetAttribute(Ctx, AlignAttr);
4919template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
4924 const AlignedAttrTy *AA =
nullptr;
4926 llvm::Value *Alignment =
nullptr;
4927 llvm::ConstantInt *OffsetCI =
nullptr;
4933 AA = FuncDecl->
getAttr<AlignedAttrTy>();
4938 [[nodiscard]] llvm::AttributeList
4939 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
4940 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
4942 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4947 if (!AlignmentCI->getValue().isPowerOf2())
4949 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4952 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4964 AA->getLocation(), Alignment, OffsetCI);
4970class AssumeAlignedAttrEmitter final
4971 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4974 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4978 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
4979 if (
Expr *Offset = AA->getOffset()) {
4981 if (OffsetCI->isNullValue())
4988class AllocAlignAttrEmitter final
4989 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4993 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4997 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5006 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5007 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5008 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5011 unsigned MaxVectorWidth = 0;
5012 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5013 for (
auto *I : ST->elements())
5015 return MaxVectorWidth;
5022 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5024 bool IsVirtualFunctionPointerThunk) {
5036 const Decl *TargetDecl =
Callee.getAbstractInfo().getCalleeDecl().getDecl();
5037 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5044 if (TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5045 (TargetDecl->
hasAttr<TargetAttr>() ||
5054 dyn_cast_or_null<FunctionDecl>(TargetDecl), CallArgs, RetTy);
5061 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5064 llvm::AllocaInst *AI;
5066 IP = IP->getNextNode();
5067 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",