32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
46using namespace CodeGen;
52 default:
return llvm::CallingConv::C;
57 case CC_Win64:
return llvm::CallingConv::Win64;
59 case CC_AAPCS:
return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
73 case CC_Swift:
return llvm::CallingConv::Swift;
75 case CC_M68kRTD:
return llvm::CallingConv::M68k_RTD;
129 unsigned totalArgs) {
131 assert(paramInfos.size() <= prefixArgs);
132 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
134 paramInfos.reserve(totalArgs);
137 paramInfos.resize(prefixArgs);
141 paramInfos.push_back(ParamInfo);
143 if (ParamInfo.hasPassObjectSize())
144 paramInfos.emplace_back();
147 assert(paramInfos.size() <= totalArgs &&
148 "Did we forget to insert pass_object_size args?");
150 paramInfos.resize(totalArgs);
160 if (!FPT->hasExtParameterInfos()) {
161 assert(paramInfos.empty() &&
162 "We have paramInfos, but the prototype doesn't?");
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
167 unsigned PrefixSize = prefix.size();
171 prefix.reserve(prefix.size() + FPT->getNumParams());
173 auto ExtInfos = FPT->getExtParameterInfos();
174 assert(ExtInfos.size() == FPT->getNumParams());
175 for (
unsigned I = 0,
E = FPT->getNumParams(); I !=
E; ++I) {
176 prefix.push_back(FPT->getParamType(I));
177 if (ExtInfos[I].hasPassObjectSize())
200 FTP->getExtInfo(), paramInfos,
Required);
208 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
233 if (PcsAttr *PCS =
D->
getAttr<PcsAttr>())
236 if (
D->
hasAttr<AArch64VectorPcsAttr>())
239 if (
D->
hasAttr<AArch64SVEPcsAttr>())
242 if (
D->
hasAttr<AMDGPUKernelCallAttr>())
266 if (
D->
hasAttr<RISCVVectorCCAttr>())
287 return ::arrangeLLVMFunctionInfo(
288 *
this,
true, argTypes,
295 if (FD->
hasAttr<CUDAGlobalAttr>()) {
308 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
309 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
331 !
Target.getCXXABI().hasConstructorVariants();
336 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
344 bool PassParams =
true;
346 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
349 if (
auto Inherited = CD->getInheritedConstructor())
361 if (!paramInfos.empty()) {
364 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
367 paramInfos.append(AddedArgs.
Suffix,
372 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
381 argTypes, extInfo, paramInfos, required);
387 for (
auto &arg : args)
395 for (
auto &arg : args)
402 unsigned prefixArgs,
unsigned totalArgs) {
422 unsigned ExtraPrefixArgs,
423 unsigned ExtraSuffixArgs,
424 bool PassProtoArgs) {
427 for (
const auto &Arg : args)
431 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
436 FPT, TotalPrefixArgs + ExtraSuffixArgs)
449 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
456 ArgTypes, Info, ParamInfos,
Required);
464 if (MD->isImplicitObjectMemberFunction())
469 assert(isa<FunctionType>(FTy));
476 std::nullopt, noProto->getExtInfo(), {},
511 I->hasAttr<NoEscapeAttr>());
512 extParamInfos.push_back(extParamInfo);
519 if (
getContext().getLangOpts().ObjCAutoRefCount &&
520 MD->
hasAttr<NSReturnsRetainedAttr>())
546 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
547 isa<CXXDestructorDecl>(GD.
getDecl()))
560 assert(MD->
isVirtual() &&
"only methods have thunks");
577 ArgTys.push_back(*FTP->param_type_begin());
579 ArgTys.push_back(Context.
IntTy);
594 unsigned numExtraRequiredArgs,
596 assert(args.size() >= numExtraRequiredArgs);
606 if (proto->isVariadic())
609 if (proto->hasExtParameterInfos())
619 cast<FunctionNoProtoType>(fnType))) {
625 for (
const auto &arg : args)
630 paramInfos, required);
642 chainCall ? 1 : 0, chainCall);
671 for (
const auto &Arg : args)
704 unsigned numPrefixArgs) {
705 assert(numPrefixArgs + 1 <= args.size() &&
706 "Emitting a call with less args than the required prefix?");
718 paramInfos, required);
730 assert(signature.
arg_size() <= args.size());
731 if (signature.
arg_size() == args.size())
736 if (!sigParamInfos.empty()) {
737 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
738 paramInfos.resize(args.size());
770 assert(llvm::all_of(argTypes,
774 llvm::FoldingSetNodeID ID;
779 bool isDelegateCall =
782 info, paramInfos, required, resultType, argTypes);
784 void *insertPos =
nullptr;
785 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
793 info, paramInfos, resultType, argTypes, required);
794 FunctionInfos.InsertNode(FI, insertPos);
796 bool inserted = FunctionsBeingProcessed.insert(FI).second;
798 assert(inserted &&
"Recursively being processed?");
801 if (CC == llvm::CallingConv::SPIR_KERNEL) {
819 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
822 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
823 assert(erased &&
"Not in set?");
829 bool chainCall,
bool delegateCall,
835 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
840 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
841 argTypes.size() + 1, paramInfos.size()));
844 FI->CallingConvention = llvmCC;
845 FI->EffectiveCallingConvention = llvmCC;
846 FI->ASTCallingConvention = info.
getCC();
847 FI->InstanceMethod = instanceMethod;
848 FI->ChainCall = chainCall;
849 FI->DelegateCall = delegateCall;
855 FI->Required = required;
858 FI->ArgStruct =
nullptr;
859 FI->ArgStructAlign = 0;
860 FI->NumArgs = argTypes.size();
861 FI->HasExtParameterInfos = !paramInfos.empty();
862 FI->getArgsBuffer()[0].
type = resultType;
863 FI->MaxVectorWidth = 0;
864 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
865 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
866 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
867 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
877struct TypeExpansion {
878 enum TypeExpansionKind {
890 const TypeExpansionKind
Kind;
892 TypeExpansion(TypeExpansionKind K) :
Kind(K) {}
893 virtual ~TypeExpansion() {}
896struct ConstantArrayExpansion : TypeExpansion {
900 ConstantArrayExpansion(
QualType EltTy, uint64_t NumElts)
901 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
902 static bool classof(
const TypeExpansion *TE) {
903 return TE->Kind == TEK_ConstantArray;
907struct RecordExpansion : TypeExpansion {
914 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
915 Fields(
std::move(Fields)) {}
916 static bool classof(
const TypeExpansion *TE) {
917 return TE->Kind == TEK_Record;
921struct ComplexExpansion : TypeExpansion {
925 static bool classof(
const TypeExpansion *TE) {
930struct NoExpansion : TypeExpansion {
931 NoExpansion() : TypeExpansion(TEK_None) {}
932 static bool classof(
const TypeExpansion *TE) {
933 return TE->Kind == TEK_None;
938static std::unique_ptr<TypeExpansion>
941 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
949 "Cannot expand structure with flexible array.");
956 for (
const auto *FD : RD->
fields()) {
957 if (FD->isZeroLengthBitField(Context))
959 assert(!FD->isBitField() &&
960 "Cannot expand structure with bit-field members.");
962 if (UnionSize < FieldSize) {
963 UnionSize = FieldSize;
968 Fields.push_back(LargestFD);
970 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
971 assert(!CXXRD->isDynamicClass() &&
972 "cannot expand vtable pointers in dynamic classes");
973 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
976 for (
const auto *FD : RD->
fields()) {
977 if (FD->isZeroLengthBitField(Context))
979 assert(!FD->isBitField() &&
980 "Cannot expand structure with bit-field members.");
981 Fields.push_back(FD);
984 return std::make_unique<RecordExpansion>(std::move(Bases),
988 return std::make_unique<ComplexExpansion>(CT->getElementType());
990 return std::make_unique<NoExpansion>();
995 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
998 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1000 for (
auto BS : RExp->Bases)
1002 for (
auto FD : RExp->Fields)
1006 if (isa<ComplexExpansion>(Exp.get()))
1008 assert(isa<NoExpansion>(Exp.get()));
1016 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1017 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1020 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1021 for (
auto BS : RExp->Bases)
1023 for (
auto FD : RExp->Fields)
1025 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1030 assert(isa<NoExpansion>(Exp.get()));
1036 ConstantArrayExpansion *CAE,
1038 llvm::function_ref<
void(
Address)> Fn) {
1039 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1046 llvm::Function::arg_iterator &AI) {
1048 "Unexpected non-simple lvalue during struct expansion.");
1051 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1054 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1055 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1057 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1067 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1069 for (
auto FD : RExp->Fields) {
1072 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1074 }
else if (isa<ComplexExpansion>(Exp.get())) {
1075 auto realValue = &*AI++;
1076 auto imagValue = &*AI++;
1081 assert(isa<NoExpansion>(Exp.get()));
1082 llvm::Value *Arg = &*AI++;
1089 if (Arg->getType()->isPointerTy()) {
1098void CodeGenFunction::ExpandTypeToArgs(
1102 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1106 *
this, CAExp, Addr, [&](
Address EltAddr) {
1110 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1113 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1124 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1129 for (
auto FD : RExp->Fields) {
1132 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1135 }
else if (isa<ComplexExpansion>(Exp.get())) {
1137 IRCallArgs[IRCallArgPos++] = CV.first;
1138 IRCallArgs[IRCallArgPos++] = CV.second;
1140 assert(isa<NoExpansion>(Exp.get()));
1142 assert(RV.isScalar() &&
1143 "Unexpected non-scalar rvalue during struct expansion.");
1146 llvm::Value *
V = RV.getScalarVal();
1147 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1148 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1149 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1151 IRCallArgs[IRCallArgPos++] =
V;
1159 const Twine &Name =
"tmp") {
1173 llvm::StructType *SrcSTy,
1176 if (SrcSTy->getNumElements() == 0)
return SrcPtr;
1184 uint64_t FirstEltSize =
1186 if (FirstEltSize < DstSize &&
1195 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1211 if (Val->getType() == Ty)
1214 if (isa<llvm::PointerType>(Val->getType())) {
1216 if (isa<llvm::PointerType>(Ty))
1217 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1223 llvm::Type *DestIntTy = Ty;
1224 if (isa<llvm::PointerType>(DestIntTy))
1227 if (Val->getType() != DestIntTy) {
1229 if (DL.isBigEndian()) {
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1233 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1235 if (SrcSize > DstSize) {
1236 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1237 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1239 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1240 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1244 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1248 if (isa<llvm::PointerType>(Ty))
1249 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1272 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1274 DstSize.getFixedValue(), CGF);
1282 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1283 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1289 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1290 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1304 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1305 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1311 ScalableDstTy = llvm::ScalableVectorType::get(
1312 FixedSrcTy->getElementType(),
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1317 auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
1318 auto *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1320 ScalableDstTy, UndefVec, Load, Zero,
"cast.scalable");
1321 if (ScalableDstTy != Ty)
1334 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1339 llvm::TypeSize DstSize,
1340 bool DstIsVolatile) {
1344 llvm::Type *SrcTy = Src->getType();
1351 if (llvm::StructType *DstSTy =
1353 assert(!SrcSize.isScalable());
1355 SrcSize.getFixedValue(), *
this);
1359 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1360 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1365 }
else if (llvm::StructType *STy =
1366 dyn_cast<llvm::StructType>(Src->getType())) {
1369 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1371 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1377 }
else if (SrcTy->isIntegerTy()) {
1379 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1417class ClangToLLVMArgMapping {
1418 static const unsigned InvalidIndex = ~0
U;
1419 unsigned InallocaArgNo;
1421 unsigned TotalIRArgs;
1425 unsigned PaddingArgIndex;
1428 unsigned FirstArgIndex;
1429 unsigned NumberOfArgs;
1432 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1440 bool OnlyRequiredArgs =
false)
1441 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1442 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1443 construct(Context, FI, OnlyRequiredArgs);
1446 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1447 unsigned getInallocaArgNo()
const {
1448 assert(hasInallocaArg());
1449 return InallocaArgNo;
1452 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1453 unsigned getSRetArgNo()
const {
1454 assert(hasSRetArg());
1458 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1460 bool hasPaddingArg(
unsigned ArgNo)
const {
1461 assert(ArgNo < ArgInfo.size());
1462 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1464 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1465 assert(hasPaddingArg(ArgNo));
1466 return ArgInfo[ArgNo].PaddingArgIndex;
1471 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1472 assert(ArgNo < ArgInfo.size());
1473 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1474 ArgInfo[ArgNo].NumberOfArgs);
1479 bool OnlyRequiredArgs);
1482void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1484 bool OnlyRequiredArgs) {
1485 unsigned IRArgNo = 0;
1486 bool SwapThisWithSRet =
false;
1491 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1502 auto &IRArgs = ArgInfo[ArgNo];
1505 IRArgs.PaddingArgIndex = IRArgNo++;
1511 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1513 IRArgs.NumberOfArgs = STy->getNumElements();
1515 IRArgs.NumberOfArgs = 1;
1521 IRArgs.NumberOfArgs = 1;
1526 IRArgs.NumberOfArgs = 0;
1536 if (IRArgs.NumberOfArgs > 0) {
1537 IRArgs.FirstArgIndex = IRArgNo;
1538 IRArgNo += IRArgs.NumberOfArgs;
1543 if (IRArgNo == 1 && SwapThisWithSRet)
1546 assert(ArgNo == ArgInfo.size());
1549 InallocaArgNo = IRArgNo++;
1551 TotalIRArgs = IRArgNo;
1559 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1574 switch (BT->getKind()) {
1577 case BuiltinType::Float:
1579 case BuiltinType::Double:
1581 case BuiltinType::LongDouble:
1592 if (BT->getKind() == BuiltinType::LongDouble)
1608 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1610 assert(Inserted &&
"Recursively being processed?");
1612 llvm::Type *resultType =
nullptr;
1617 llvm_unreachable(
"Invalid ABI kind for return argument");
1629 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1645 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1649 if (IRFunctionArgs.hasSRetArg()) {
1652 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1657 if (IRFunctionArgs.hasInallocaArg())
1658 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1665 for (; it != ie; ++it, ++ArgNo) {
1669 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1670 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1673 unsigned FirstIRArg, NumIRArgs;
1674 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1679 assert(NumIRArgs == 0);
1683 assert(NumIRArgs == 1);
1685 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1689 assert(NumIRArgs == 1);
1690 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1698 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1700 assert(NumIRArgs == st->getNumElements());
1701 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1702 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1704 assert(NumIRArgs == 1);
1705 ArgTypes[FirstIRArg] = argType;
1711 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1713 *ArgTypesIter++ = EltTy;
1715 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1720 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1722 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1727 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1728 assert(Erased &&
"Not in set?");
1730 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1744 llvm::AttrBuilder &FuncAttrs,
1751 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1755 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1757 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1761 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1763 FuncAttrs.addAttribute(
"aarch64_in_za");
1765 FuncAttrs.addAttribute(
"aarch64_out_za");
1767 FuncAttrs.addAttribute(
"aarch64_inout_za");
1771 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1773 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1775 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1777 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1781 const Decl *Callee) {
1787 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1788 AA->getAssumption().split(Attrs,
",");
1791 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1792 llvm::join(Attrs.begin(), Attrs.end(),
","));
1801 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1802 return ClassDecl->hasTrivialDestructor();
1808 const Decl *TargetDecl) {
1814 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1818 if (!
Module.getLangOpts().CPlusPlus)
1821 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1822 if (FDecl->isExternC())
1824 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1826 if (VDecl->isExternC())
1834 return Module.getCodeGenOpts().StrictReturn ||
1835 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1836 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1843 llvm::DenormalMode FP32DenormalMode,
1844 llvm::AttrBuilder &FuncAttrs) {
1845 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1846 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1848 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1849 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1857 llvm::AttrBuilder &FuncAttrs) {
1863 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1865 llvm::AttrBuilder &FuncAttrs) {
1868 if (CodeGenOpts.OptimizeSize)
1869 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1870 if (CodeGenOpts.OptimizeSize == 2)
1871 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1874 if (CodeGenOpts.DisableRedZone)
1875 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1876 if (CodeGenOpts.IndirectTlsSegRefs)
1877 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1878 if (CodeGenOpts.NoImplicitFloat)
1879 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1881 if (AttrOnCallSite) {
1886 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1888 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1890 switch (CodeGenOpts.getFramePointer()) {
1897 FuncAttrs.addAttribute(
"frame-pointer",
1899 CodeGenOpts.getFramePointer()));
1902 if (CodeGenOpts.LessPreciseFPMAD)
1903 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
1905 if (CodeGenOpts.NullPointerIsValid)
1906 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1909 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
1913 if (LangOpts.NoHonorInfs)
1914 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
1915 if (LangOpts.NoHonorNaNs)
1916 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
1917 if (LangOpts.ApproxFunc)
1918 FuncAttrs.addAttribute(
"approx-func-fp-math",
"true");
1919 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1920 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1921 (LangOpts.getDefaultFPContractMode() ==
1923 LangOpts.getDefaultFPContractMode() ==
1925 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
1926 if (CodeGenOpts.SoftFloat)
1927 FuncAttrs.addAttribute(
"use-soft-float",
"true");
1928 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
1929 llvm::utostr(CodeGenOpts.SSPBufferSize));
1930 if (LangOpts.NoSignedZero)
1931 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
1934 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
1935 if (!Recips.empty())
1936 FuncAttrs.addAttribute(
"reciprocal-estimates",
1937 llvm::join(Recips,
","));
1941 FuncAttrs.addAttribute(
"prefer-vector-width",
1944 if (CodeGenOpts.StackRealignment)
1945 FuncAttrs.addAttribute(
"stackrealign");
1946 if (CodeGenOpts.Backchain)
1947 FuncAttrs.addAttribute(
"backchain");
1948 if (CodeGenOpts.EnableSegmentedStacks)
1949 FuncAttrs.addAttribute(
"split-stack");
1951 if (CodeGenOpts.SpeculativeLoadHardening)
1952 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1955 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1956 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1957 FuncAttrs.removeAttribute(
"zero-call-used-regs");
1959 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1960 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
1962 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1963 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
1965 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1966 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
1968 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1969 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
1971 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1972 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
1974 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1975 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
1977 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1978 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
1980 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
1981 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
1992 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1997 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
1998 LangOpts.SYCLIsDevice) {
1999 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2002 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2003 FuncAttrs.addAttribute(
"save-reg-params");
2006 StringRef Var,
Value;
2008 FuncAttrs.addAttribute(Var,
Value);
2022 const llvm::Function &F,
2024 auto FFeatures = F.getFnAttribute(
"target-features");
2026 llvm::StringSet<> MergedNames;
2028 MergedFeatures.reserve(TargetOpts.
Features.size());
2030 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2031 for (StringRef Feature : FeatureRange) {
2032 if (Feature.empty())
2034 assert(Feature[0] ==
'+' || Feature[0] ==
'-');
2035 StringRef Name = Feature.drop_front(1);
2036 bool Merged = !MergedNames.insert(Name).second;
2038 MergedFeatures.push_back(Feature);
2042 if (FFeatures.isValid())
2043 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2044 AddUnmergedFeatures(TargetOpts.
Features);
2046 if (!MergedFeatures.empty()) {
2047 llvm::sort(MergedFeatures);
2048 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2055 bool WillInternalize) {
2057 llvm::AttrBuilder FuncAttrs(F.getContext());
2060 if (!TargetOpts.
CPU.empty())
2061 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2062 if (!TargetOpts.
TuneCPU.empty())
2063 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2066 CodeGenOpts, LangOpts,
2069 if (!WillInternalize && F.isInterposable()) {
2074 F.addFnAttrs(FuncAttrs);
2078 llvm::AttributeMask AttrsToRemove;
2080 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2081 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2082 llvm::DenormalMode Merged =
2086 if (DenormModeToMergeF32.isValid()) {
2091 if (Merged == llvm::DenormalMode::getDefault()) {
2092 AttrsToRemove.addAttribute(
"denormal-fp-math");
2093 }
else if (Merged != DenormModeToMerge) {
2095 FuncAttrs.addAttribute(
"denormal-fp-math",
2099 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2100 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2101 }
else if (MergedF32 != DenormModeToMergeF32) {
2103 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2107 F.removeFnAttrs(AttrsToRemove);
2112 F.addFnAttrs(FuncAttrs);
2115void CodeGenModule::getTrivialDefaultFunctionAttributes(
2116 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2117 llvm::AttrBuilder &FuncAttrs) {
2118 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2123void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2125 bool AttrOnCallSite,
2126 llvm::AttrBuilder &FuncAttrs) {
2127 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2131 if (!AttrOnCallSite)
2136 llvm::AttrBuilder &attrs) {
2137 getDefaultFunctionAttributes(
"",
false,
2139 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2144 const NoBuiltinAttr *NBA =
nullptr) {
2145 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2147 AttributeName +=
"no-builtin-";
2148 AttributeName += BuiltinName;
2149 FuncAttrs.addAttribute(AttributeName);
2153 if (LangOpts.NoBuiltin) {
2155 FuncAttrs.addAttribute(
"no-builtins");
2169 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2170 FuncAttrs.addAttribute(
"no-builtins");
2175 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2179 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2180 bool CheckCoerce =
true) {
2181 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2187 if (!DL.typeSizeEqualsStoreSize(Ty))
2194 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2195 DL.getTypeSizeInBits(Ty)))
2219 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2221 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2230 unsigned NumRequiredArgs,
unsigned ArgNo) {
2231 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2236 if (ArgNo >= NumRequiredArgs)
2240 if (ArgNo < FD->getNumParams()) {
2241 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2242 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2259 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2262 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2264 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2265 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2274 llvm::FPClassTest Mask = llvm::fcNone;
2275 if (LangOpts.NoHonorInfs)
2276 Mask |= llvm::fcInf;
2277 if (LangOpts.NoHonorNaNs)
2278 Mask |= llvm::fcNan;
2284 llvm::AttributeList &Attrs) {
2285 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2286 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2287 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2313 llvm::AttributeList &AttrList,
2315 bool AttrOnCallSite,
bool IsThunk) {
2323 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2325 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2337 bool HasOptnone =
false;
2339 const NoBuiltinAttr *NBA =
nullptr;
2343 auto AddPotentialArgAccess = [&]() {
2344 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2346 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2347 llvm::MemoryEffects::argMemOnly());
2354 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2355 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2356 if (TargetDecl->
hasAttr<NoThrowAttr>())
2357 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2358 if (TargetDecl->
hasAttr<NoReturnAttr>())
2359 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2360 if (TargetDecl->
hasAttr<ColdAttr>())
2361 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2362 if (TargetDecl->
hasAttr<HotAttr>())
2363 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2364 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2365 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2366 if (TargetDecl->
hasAttr<ConvergentAttr>())
2367 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2369 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2372 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2374 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2376 (Kind == OO_New || Kind == OO_Array_New))
2377 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2380 const bool IsVirtualCall = MD && MD->
isVirtual();
2383 if (!(AttrOnCallSite && IsVirtualCall)) {
2384 if (Fn->isNoReturn())
2385 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2386 NBA = Fn->getAttr<NoBuiltinAttr>();
2390 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2393 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2394 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2398 if (TargetDecl->
hasAttr<ConstAttr>()) {
2399 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2400 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2403 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2404 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2405 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2406 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2408 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2409 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2410 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2411 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2413 if (TargetDecl->
hasAttr<RestrictAttr>())
2414 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2415 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2416 !CodeGenOpts.NullPointerIsValid)
2417 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2418 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2419 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2420 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2421 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2422 if (TargetDecl->
hasAttr<LeafAttr>())
2423 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2424 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2425 FuncAttrs.addAttribute(
"bpf_fastcall");
2427 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2428 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2429 std::optional<unsigned> NumElemsParam;
2430 if (AllocSize->getNumElemsParam().isValid())
2431 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2432 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2436 if (TargetDecl->
hasAttr<OpenCLKernelAttr>()) {
2439 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2446 FuncAttrs.addAttribute(
2447 "uniform-work-group-size",
2448 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2452 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2454 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2456 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2457 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2469 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2474 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2475 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2476 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2477 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2478 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2479 FuncAttrs.removeAttribute(
"split-stack");
2480 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2483 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2484 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2485 FuncAttrs.addAttribute(
2486 "zero-call-used-regs",
2487 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2494 if (CodeGenOpts.NoPLT) {
2495 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2496 if (!Fn->isDefined() && !AttrOnCallSite) {
2497 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2502 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2503 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2508 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2509 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2510 if (!FD->isExternallyVisible())
2511 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2518 if (!AttrOnCallSite) {
2519 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2520 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2523 auto shouldDisableTailCalls = [&] {
2525 if (CodeGenOpts.DisableTailCalls)
2531 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2532 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2535 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2536 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2537 if (!BD->doesNotEscape())
2543 if (shouldDisableTailCalls())
2544 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2548 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2552 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2559 if (CodeGenOpts.EnableNoundefAttrs &&
2563 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2569 RetAttrs.addAttribute(llvm::Attribute::SExt);
2571 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2575 RetAttrs.addAttribute(llvm::Attribute::InReg);
2587 AddPotentialArgAccess();
2596 llvm_unreachable(
"Invalid ABI kind for return argument");
2604 RetAttrs.addDereferenceableAttr(
2606 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2607 !CodeGenOpts.NullPointerIsValid)
2608 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2610 llvm::Align Alignment =
2612 RetAttrs.addAlignmentAttr(Alignment);
2617 bool hasUsedSRet =
false;
2621 if (IRFunctionArgs.hasSRetArg()) {
2623 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2624 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2625 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2628 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2630 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2635 if (IRFunctionArgs.hasInallocaArg()) {
2638 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2647 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2649 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2656 if (!CodeGenOpts.NullPointerIsValid &&
2658 Attrs.addAttribute(llvm::Attribute::NonNull);
2665 Attrs.addDereferenceableOrNullAttr(
2671 llvm::Align Alignment =
2675 Attrs.addAlignmentAttr(Alignment);
2677 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2683 I !=
E; ++I, ++ArgNo) {
2689 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2691 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2692 llvm::AttributeSet::get(
2694 llvm::AttrBuilder(
getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2699 if (CodeGenOpts.EnableNoundefAttrs &&
2701 Attrs.addAttribute(llvm::Attribute::NoUndef);
2710 Attrs.addAttribute(llvm::Attribute::SExt);
2712 Attrs.addAttribute(llvm::Attribute::ZExt);
2716 Attrs.addAttribute(llvm::Attribute::Nest);
2718 Attrs.addAttribute(llvm::Attribute::InReg);
2719 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2726 Attrs.addAttribute(llvm::Attribute::InReg);
2729 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2732 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2733 Decl->getArgPassingRestrictions() ==
2737 Attrs.addAttribute(llvm::Attribute::NoAlias);
2762 AddPotentialArgAccess();
2767 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2778 AddPotentialArgAccess();
2785 Attrs.addDereferenceableAttr(
2787 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2788 !CodeGenOpts.NullPointerIsValid)
2789 Attrs.addAttribute(llvm::Attribute::NonNull);
2791 llvm::Align Alignment =
2793 Attrs.addAlignmentAttr(Alignment);
2801 if (TargetDecl && TargetDecl->
hasAttr<OpenCLKernelAttr>() &&
2805 llvm::Align Alignment =
2807 Attrs.addAlignmentAttr(Alignment);
2819 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2824 Attrs.addAttribute(llvm::Attribute::NoAlias);
2828 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2830 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2831 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2837 Attrs.addAttribute(llvm::Attribute::SwiftError);
2841 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2845 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2850 Attrs.addAttribute(llvm::Attribute::NoCapture);
2852 if (Attrs.hasAttributes()) {
2853 unsigned FirstIRArg, NumIRArgs;
2854 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2855 for (
unsigned i = 0; i < NumIRArgs; i++)
2856 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2862 AttrList = llvm::AttributeList::get(
2871 llvm::Value *value) {
2872 llvm::Type *varType = CGF.
ConvertType(var->getType());
2876 if (value->getType() == varType)
return value;
2878 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2879 &&
"unexpected promotion type");
2881 if (isa<llvm::IntegerType>(varType))
2882 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
2884 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
2890 QualType ArgType,
unsigned ArgNo) {
2902 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
2909 if (NNAttr->isNonNull(ArgNo))
2939 if (FD->hasImplicitReturnZero()) {
2940 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2942 llvm::Constant*
Zero = llvm::Constant::getNullValue(LLVMTy);
2951 assert(
Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2956 if (IRFunctionArgs.hasInallocaArg())
2957 ArgStruct =
Address(
Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2961 if (IRFunctionArgs.hasSRetArg()) {
2962 auto AI =
Fn->getArg(IRFunctionArgs.getSRetArgNo());
2963 AI->setName(
"agg.result");
2964 AI->addAttr(llvm::Attribute::NoAlias);
2971 ArgVals.reserve(Args.size());
2977 assert(FI.
arg_size() == Args.size() &&
2978 "Mismatch between function signature & arguments.");
2981 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2982 i != e; ++i, ++info_it, ++ArgNo) {
2987 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2995 unsigned FirstIRArg, NumIRArgs;
2996 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3000 assert(NumIRArgs == 0);
3013 assert(NumIRArgs == 1);
3037 ParamAddr = AlignedTemp;
3054 auto AI =
Fn->getArg(FirstIRArg);
3062 assert(NumIRArgs == 1);
3064 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3067 PVD->getFunctionScopeIndex()) &&
3069 AI->addAttr(llvm::Attribute::NonNull);
3071 QualType OTy = PVD->getOriginalType();
3072 if (
const auto *ArrTy =
3079 QualType ETy = ArrTy->getElementType();
3080 llvm::Align Alignment =
3082 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3083 uint64_t ArrSize = ArrTy->getZExtSize();
3087 Attrs.addDereferenceableAttr(
3088 getContext().getTypeSizeInChars(ETy).getQuantity() *
3090 AI->addAttrs(Attrs);
3091 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3094 AI->addAttr(llvm::Attribute::NonNull);
3097 }
else if (
const auto *ArrTy =
3103 QualType ETy = ArrTy->getElementType();
3104 llvm::Align Alignment =
3106 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3107 if (!
getTypes().getTargetAddressSpace(ETy) &&
3109 AI->addAttr(llvm::Attribute::NonNull);
3114 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3117 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3118 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3122 llvm::ConstantInt *AlignmentCI =
3125 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3126 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3127 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3128 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(
3129 llvm::Align(AlignmentInt)));
3136 AI->addAttr(llvm::Attribute::NoAlias);
3144 assert(NumIRArgs == 1);
3148 llvm::Value *
V = AI;
3156 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3179 if (
V->getType() != LTy)
3190 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3191 llvm::Value *Coerced =
Fn->getArg(FirstIRArg);
3192 if (
auto *VecTyFrom =
3193 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3196 if (VecTyFrom->getElementType()->isIntegerTy(1) &&
3197 VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
3198 VecTyTo->getElementType() ==
Builder.getInt8Ty()) {
3199 VecTyFrom = llvm::ScalableVectorType::get(
3200 VecTyTo->getElementType(),
3201 VecTyFrom->getElementCount().getKnownMinValue() / 8);
3202 Coerced =
Builder.CreateBitCast(Coerced, VecTyFrom);
3204 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3207 assert(NumIRArgs == 1);
3208 Coerced->setName(Arg->
getName() +
".coerce");
3210 VecTyTo, Coerced, Zero,
"cast.fixed")));
3216 llvm::StructType *STy =
3219 STy->getNumElements() > 1) {
3220 [[maybe_unused]] llvm::TypeSize StructSize =
3222 [[maybe_unused]] llvm::TypeSize PtrElementSize =
3224 if (STy->containsHomogeneousScalableVectorTypes()) {
3225 assert(StructSize == PtrElementSize &&
3226 "Only allow non-fractional movement of structure with"
3227 "homogeneous scalable vector type");
3243 STy->getNumElements() > 1) {
3245 llvm::TypeSize PtrElementSize =
3247 if (StructSize.isScalable()) {
3248 assert(STy->containsHomogeneousScalableVectorTypes() &&
3249 "ABI only supports structure with homogeneous scalable vector "
3251 assert(StructSize == PtrElementSize &&
3252 "Only allow non-fractional movement of structure with"
3253 "homogeneous scalable vector type");
3254 assert(STy->getNumElements() == NumIRArgs);
3256 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3257 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3258 auto *AI =
Fn->getArg(FirstIRArg + i);
3259 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3261 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3266 uint64_t SrcSize = StructSize.getFixedValue();
3267 uint64_t DstSize = PtrElementSize.getFixedValue();
3270 if (SrcSize <= DstSize) {
3277 assert(STy->getNumElements() == NumIRArgs);
3278 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3279 auto AI =
Fn->getArg(FirstIRArg + i);
3280 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3285 if (SrcSize > DstSize) {
3291 assert(NumIRArgs == 1);
3292 auto AI =
Fn->getArg(FirstIRArg);
3293 AI->setName(Arg->
getName() +
".coerce");
3296 llvm::TypeSize::getFixed(
3297 getContext().getTypeSizeInChars(Ty).getQuantity() -
3323 unsigned argIndex = FirstIRArg;
3324 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3325 llvm::Type *eltType = coercionType->getElementType(i);
3330 auto elt =
Fn->getArg(argIndex++);
3333 assert(argIndex == FirstIRArg + NumIRArgs);
3345 auto FnArgIter =
Fn->arg_begin() + FirstIRArg;
3346 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3347 assert(FnArgIter ==
Fn->arg_begin() + FirstIRArg + NumIRArgs);
3348 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3349 auto AI =
Fn->getArg(FirstIRArg + i);
3350 AI->setName(Arg->
getName() +
"." + Twine(i));
3356 assert(NumIRArgs == 0);
3368 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3369 for (
int I = Args.size() - 1; I >= 0; --I)
3372 for (
unsigned I = 0,
E = Args.size(); I !=
E; ++I)
3378 while (insn->use_empty()) {
3379 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3380 if (!bitcast)
return;
3383 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3384 bitcast->eraseFromParent();
3390 llvm::Value *result) {
3392 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3393 if (BB->empty())
return nullptr;
3394 if (&BB->back() != result)
return nullptr;
3396 llvm::Type *resultType = result->getType();
3399 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3405 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3408 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3411 if (generator->getNextNode() != bitcast)
3414 InstsToKill.push_back(bitcast);
3421 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3422 if (!call)
return nullptr;
3424 bool doRetainAutorelease;
3427 doRetainAutorelease =
true;
3428 }
else if (call->getCalledOperand() ==
3430 doRetainAutorelease =
false;
3438 llvm::Instruction *prev = call->getPrevNode();
3440 if (isa<llvm::BitCastInst>(prev)) {
3441 prev = prev->getPrevNode();
3444 assert(isa<llvm::CallInst>(prev));
3445 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3447 InstsToKill.push_back(prev);
3453 result = call->getArgOperand(0);
3454 InstsToKill.push_back(call);
3458 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3459 if (!bitcast->hasOneUse())
break;
3460 InstsToKill.push_back(bitcast);
3461 result = bitcast->getOperand(0);
3465 for (
auto *I : InstsToKill)
3466 I->eraseFromParent();
3469 if (doRetainAutorelease)
3473 return CGF.
Builder.CreateBitCast(result, resultType);
3478 llvm::Value *result) {
3481 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3482 if (!method)
return nullptr;
3488 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3489 if (!retainCall || retainCall->getCalledOperand() !=
3494 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3495 llvm::LoadInst *load =
3496 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3497 if (!load || load->isAtomic() || load->isVolatile() ||
3504 llvm::Type *resultType = result->getType();
3506 assert(retainCall->use_empty());
3507 retainCall->eraseFromParent();
3510 return CGF.
Builder.CreateBitCast(load, resultType);
3517 llvm::Value *result) {
3540 auto GetStoreIfValid = [&CGF,
3541 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3542 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3543 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3549 assert(!SI->isAtomic() &&
3557 if (!ReturnValuePtr->hasOneUse()) {
3558 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3559 if (IP->empty())
return nullptr;
3563 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3564 if (isa<llvm::BitCastInst>(&I))
3566 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3567 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3570 return GetStoreIfValid(&I);
3575 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3576 if (!store)
return nullptr;
3580 llvm::BasicBlock *StoreBB = store->getParent();
3581 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3583 while (IP != StoreBB) {
3584 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3600 int BitWidth,
int CharWidth) {
3601 assert(CharWidth <= 64);
3602 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3605 if (BitOffset >= CharWidth) {
3606 Pos += BitOffset / CharWidth;
3607 BitOffset = BitOffset % CharWidth;
3610 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3611 if (BitOffset + BitWidth >= CharWidth) {
3612 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3613 BitWidth -= CharWidth - BitOffset;
3617 while (BitWidth >= CharWidth) {
3619 BitWidth -= CharWidth;
3623 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3631 int StorageSize,
int BitOffset,
int BitWidth,
3632 int CharWidth,
bool BigEndian) {
3635 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3638 std::reverse(TmpBits.begin(), TmpBits.end());
3640 for (uint64_t
V : TmpBits)
3641 Bits[StorageOffset++] |=
V;
3672 BFI.
Size, CharWidth,
3694 auto Src = TmpBits.begin();
3695 auto Dst = Bits.begin() + Offset + I * Size;
3696 for (
int J = 0; J < Size; ++J)
3716 std::fill_n(Bits.begin() + Offset, Size,
3721 int Pos,
int Size,
int CharWidth,
3726 for (
auto P = Bits.begin() + Pos,
E = Bits.begin() + Pos + Size;
P !=
E;
3728 Mask = (Mask << CharWidth) | *
P;
3730 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3732 Mask = (Mask << CharWidth) | *--
P;
3741 llvm::IntegerType *ITy,
3743 assert(Src->getType() == ITy);
3744 assert(ITy->getScalarSizeInBits() <= 64);
3747 int Size = DataLayout.getTypeStoreSize(ITy);
3755 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3761 llvm::ArrayType *ATy,
3764 int Size = DataLayout.getTypeStoreSize(ATy);
3771 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3773 llvm::Value *R = llvm::PoisonValue::get(ATy);
3774 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3776 DataLayout.isBigEndian());
3777 MaskIndex += CharsPerElt;
3778 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3779 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3780 R =
Builder.CreateInsertValue(R, T1, I);
3807 llvm::DebugLoc RetDbgLoc;
3808 llvm::Value *RV =
nullptr;
3818 llvm::Function::arg_iterator EI =
CurFn->arg_end();
3820 llvm::Value *ArgStruct = &*EI;
3824 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3830 auto AI =
CurFn->arg_begin();
3870 if (llvm::StoreInst *SI =
3876 RetDbgLoc = SI->getDebugLoc();
3878 RV = SI->getValueOperand();
3879 SI->eraseFromParent();
3902 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
3903 RT = FD->getReturnType();
3904 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
3905 RT = MD->getReturnType();
3909 llvm_unreachable(
"Unexpected function/method type");
3929 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3936 results.push_back(elt);
3940 if (results.size() == 1) {
3948 RV = llvm::PoisonValue::get(returnType);
3949 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
3950 RV =
Builder.CreateInsertValue(RV, results[i], i);
3957 llvm_unreachable(
"Invalid ABI kind for return argument");
3960 llvm::Instruction *
Ret;
3966 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3977 Ret->setDebugLoc(std::move(RetDbgLoc));
3990 ReturnsNonNullAttr *RetNNAttr =
nullptr;
3991 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
3994 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4002 assert(!requiresReturnValueNullabilityCheck() &&
4003 "Cannot check nullability and the nonnull attribute");
4004 AttrLoc = RetNNAttr->getLocation();
4005 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
4006 Handler = SanitizerHandler::NonnullReturn;
4008 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4009 if (
auto *TSI = DD->getTypeSourceInfo())
4011 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4012 CheckKind = SanitizerKind::NullabilityReturn;
4013 Handler = SanitizerHandler::NullabilityReturn;
4016 SanitizerScope SanScope(
this);
4023 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4024 if (requiresReturnValueNullabilityCheck())
4026 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4027 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4031 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4033 llvm::Value *DynamicData[] = {SLocPtr};
4034 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4054 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4055 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4082 if (
type->isReferenceType()) {
4091 param->
hasAttr<NSConsumedAttr>() &&
4092 type->isObjCRetainableType()) {
4095 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4110 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4112 "cleanup for callee-destructed param not recorded");
4114 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4120 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4133 "shouldn't have writeback for provably null argument");
4135 llvm::BasicBlock *contBB =
nullptr;
4141 if (!provablyNonNull) {
4146 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4155 "icr.writeback-cast");
4164 if (writeback.
ToUse) {
4189 if (!provablyNonNull)
4204 for (
const auto &I : llvm::reverse(Cleanups)) {
4206 I.IsActiveIP->eraseFromParent();
4212 if (uop->getOpcode() == UO_AddrOf)
4213 return uop->getSubExpr();
4243 llvm::PointerType *destType =
4245 llvm::Type *destElemType =
4262 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4268 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4272 llvm::BasicBlock *contBB =
nullptr;
4273 llvm::BasicBlock *originBB =
nullptr;
4276 llvm::Value *finalArgument;
4280 if (provablyNonNull) {
4285 finalArgument = CGF.
Builder.CreateSelect(
4286 isNull, llvm::ConstantPointerNull::get(destType),
4292 originBB = CGF.
Builder.GetInsertBlock();
4295 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4297 condEval.begin(CGF);
4301 llvm::Value *valueToUse =
nullptr;
4309 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4326 if (shouldCopy && !provablyNonNull) {
4327 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4332 llvm::PHINode *phiToUse = CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
4334 phiToUse->addIncoming(valueToUse, copyBB);
4335 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4337 valueToUse = phiToUse;
4351 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4357 CGF.
Builder.CreateStackRestore(StackBase);
4365 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4370 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4371 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4374 const NonNullAttr *NNAttr =
nullptr;
4375 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4378 bool CanCheckNullability =
false;
4379 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4380 !PVD->getType()->isRecordType()) {
4381 auto Nullability = PVD->getType()->getNullability();
4382 CanCheckNullability = Nullability &&
4384 PVD->getTypeSourceInfo();
4387 if (!NNAttr && !CanCheckNullability)
4394 AttrLoc = NNAttr->getLocation();
4395 CheckKind = SanitizerKind::NonnullAttribute;
4396 Handler = SanitizerHandler::NonnullArg;
4398 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4399 CheckKind = SanitizerKind::NullabilityArg;
4400 Handler = SanitizerHandler::NullabilityArg;
4403 SanitizerScope SanScope(
this);
4405 llvm::Constant *StaticData[] = {
4407 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4409 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4414 AbstractCallee AC,
unsigned ParmNum) {
4415 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4435 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4446 return classDecl->getTypeParamListAsWritten();
4450 return catDecl->getTypeParamList();
4460 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4461 AbstractCallee AC,
unsigned ParamsToSkip, EvaluationOrder Order) {
4464 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4465 "Can't skip parameters if type info is not provided");
4475 bool IsVariadic =
false;
4482 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4483 MD->param_type_end());
4487 ExplicitCC = FPT->getExtInfo().getCC();
4488 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4489 FPT->param_type_end());
4497 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4499 (isGenericMethod || Ty->isVariablyModifiedType() ||
4500 Ty.getNonReferenceType()->isObjCRetainableType() ||
4502 .getCanonicalType(Ty.getNonReferenceType())
4504 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4505 "type mismatch in call argument!");
4511 assert((Arg == ArgRange.end() || IsVariadic) &&
4512 "Extra arguments in non-variadic function!");
4517 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4518 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4519 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4531 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4533 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4535 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4542 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4543 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(),
T,
4544 EmittedArg.getScalarVal(),
4550 std::swap(Args.back(), *(&Args.back() - 1));
4555 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4556 "inalloca only supported on x86");
4561 size_t CallArgsStart = Args.size();
4562 for (
unsigned I = 0,
E = ArgTypes.size(); I !=
E; ++I) {
4563 unsigned Idx = LeftToRight ? I :
E - I - 1;
4565 unsigned InitialArgSize = Args.size();
4568 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4569 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4571 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4573 "Argument and parameter types don't match");
4577 assert(InitialArgSize + 1 == Args.size() &&
4578 "The code below depends on only adding one arg per EmitCallArg");
4579 (void)InitialArgSize;
4582 if (!Args.back().hasLValue()) {
4583 RValue RVArg = Args.back().getKnownRValue();
4585 ParamsToSkip + Idx);
4589 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4596 std::reverse(Args.begin() + CallArgsStart, Args.end());
4604 : Addr(Addr), Ty(Ty) {}
4622struct DisableDebugLocationUpdates {
4624 bool disabledDebugInfo;
4626 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(
E) && CGF.
getDebugInfo()))
4629 ~DisableDebugLocationUpdates() {
4630 if (disabledDebugInfo)
4666 DisableDebugLocationUpdates Dis(*
this,
E);
4668 = dyn_cast<ObjCIndirectCopyRestoreExpr>(
E)) {
4674 "reference binding to unmaterialized r-value!");
4686 if (
type->isRecordType() &&
4693 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4694 if (
const auto *RD =
type->getAsCXXRecordDecl())
4695 DestroyedInCallee = RD->hasNonTrivialDestructor();
4697 NeedsCleanup =
type.isDestructedType();
4699 if (DestroyedInCallee)
4706 if (DestroyedInCallee && NeedsCleanup) {
4713 llvm::Instruction *IsActive =
4720 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(
E) &&
4721 cast<CastExpr>(
E)->getCastKind() == CK_LValueToRValue &&
4722 !
type->isArrayParameterType()) {
4732QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4736 if (!
getTarget().getTriple().isOSWindows())
4753CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4756 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4763 const llvm::Twine &name) {
4771 const llvm::Twine &name) {
4773 for (
auto arg : args)
4774 values.push_back(
arg.emitRawPointer(*
this));
4781 const llvm::Twine &name) {
4783 call->setDoesNotThrow();
4790 const llvm::Twine &name) {
4805 if (
auto *CalleeFn = dyn_cast<llvm::Function>(
Callee->stripPointerCasts())) {
4806 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4807 auto IID = CalleeFn->getIntrinsicID();
4808 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4821 const llvm::Twine &name) {
4822 llvm::CallInst *call =
Builder.CreateCall(
4838 llvm::InvokeInst *invoke =
4844 invoke->setDoesNotReturn();
4847 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
4848 call->setDoesNotReturn();
4857 const Twine &name) {
4865 const Twine &name) {
4875 const Twine &Name) {
4880 llvm::CallBase *Inst;
4882 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
4885 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4893 AddObjCARCExceptionMetadata(Inst);
4898void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4900 DeferredReplacements.push_back(
4901 std::make_pair(llvm::WeakTrackingVH(Old), New));
4908[[nodiscard]] llvm::AttributeList
4909maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4910 const llvm::AttributeList &Attrs,
4911 llvm::Align NewAlign) {
4912 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4913 if (CurAlign >= NewAlign)
4915 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4916 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4917 .addRetAttribute(Ctx, AlignAttr);
4920template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
4925 const AlignedAttrTy *AA =
nullptr;
4927 llvm::Value *Alignment =
nullptr;
4928 llvm::ConstantInt *OffsetCI =
nullptr;
4934 AA = FuncDecl->
getAttr<AlignedAttrTy>();
4939 [[nodiscard]] llvm::AttributeList
4940 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
4941 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
4943 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4948 if (!AlignmentCI->getValue().isPowerOf2())
4950 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4953 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4965 AA->getLocation(), Alignment, OffsetCI);
4971class AssumeAlignedAttrEmitter final
4972 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4975 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4979 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
4980 if (
Expr *Offset = AA->getOffset()) {
4982 if (OffsetCI->isNullValue())
4989class AllocAlignAttrEmitter final
4990 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4994 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4998 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5007 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5008 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5009 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5012 unsigned MaxVectorWidth = 0;
5013 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5014 for (
auto *I : ST->elements())
5016 return MaxVectorWidth;
5023 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5025 bool IsVirtualFunctionPointerThunk) {
5037 const Decl *TargetDecl =
Callee.getAbstractInfo().getCalleeDecl().getDecl();
5038 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5045 if (TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5046 (TargetDecl->
hasAttr<TargetAttr>() ||
5055 dyn_cast_or_null<FunctionDecl>(TargetDecl), CallArgs, RetTy);
5062 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5065 llvm::AllocaInst *AI;
5067 IP = IP->getNextNode();
5068 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5074 AI->setAlignment(Align.getAsAlign());
5075 AI->setUsedWithInAlloca(
true);
5076 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5077 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5080 ClangToLLVMArgMapping IRFunctionArgs(
CGM.
getContext(), CallInfo);
5087 llvm::Value *UnusedReturnSizePtr =
nullptr;
5089 if (IsVirtualFunctionPointerThunk && RetAI.
isIndirect()) {
5091 IRFunctionArgs.getSRetArgNo(),
5098 llvm::TypeSize size =
5103 if (IRFunctionArgs.hasSRetArg()) {
5104 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5122 assert(CallInfo.
arg_size() == CallArgs.size() &&
5123 "Mismatch between function signature & arguments.");
5126 for (CallArgList::const_iterator I = CallArgs.begin(),
E = CallArgs.end();
5127 I !=
E; ++I, ++info_it, ++ArgNo) {
5131 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5132 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5135 unsigned FirstIRArg, NumIRArgs;
5136 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5138 bool ArgHasMaybeUndefAttr =
5143 assert(NumIRArgs == 0);
5144 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5145 if (I->isAggregate()) {
5147 ? I->getKnownLValue().getAddress()
5148 : I->getKnownRValue().getAggregateAddress();
5149 llvm::Instruction *Placeholder =
5154 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5155 Builder.SetInsertPoint(Placeholder);
5168 deferPlaceholderReplacement(Placeholder, Addr.
getPointer());
5173 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5174 "indirect-arg-temp");
5175 I->copyInto(*
this, Addr);
5184 I->copyInto(*
this, Addr);
5191 assert(NumIRArgs == 1);
5192 if (I->isAggregate()) {
5202 ? I->getKnownLValue().getAddress()
5203 : I->getKnownRValue().getAggregateAddress();
5207 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5208 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5209 TD->getAllocaAddrSpace()) &&
5210 "indirect argument must be in alloca address space");
5212 bool NeedCopy =
false;
5218 }
else if (I->hasLValue()) {
5219 auto LV = I->getKnownLValue();
5225 if (!isByValOrRef ||
5230 if ((isByValOrRef &&
5238 else if ((isByValOrRef &&
5239 Addr.
getType()->getAddressSpace() != IRFuncTy->
5248 auto *
T = llvm::PointerType::get(
5254 if (ArgHasMaybeUndefAttr)
5255 Val =
Builder.CreateFreeze(Val);
5256 IRCallArgs[FirstIRArg] = Val;
5266 if (ArgHasMaybeUndefAttr)
5267 Val =
Builder.CreateFreeze(Val);
5268 IRCallArgs[FirstIRArg] = Val;
5271 llvm::TypeSize ByvalTempElementSize =
5273 llvm::Value *LifetimeSize =
5278 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5281 I->copyInto(*
this, AI);
5286 assert(NumIRArgs == 0);
5294 assert(NumIRArgs == 1);
5296 if (!I->isAggregate())
5297 V = I->getKnownRValue().getScalarVal();
5300 I->hasLValue() ? I->getKnownLValue().getAddress()
5301 : I->getKnownRValue().getAggregateAddress());
5307 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5311 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5316 cast<llvm::AllocaInst>(
V)->setSwiftError(
true);
5324 V->getType()->isIntegerTy())
5329 if (FirstIRArg < IRFuncTy->getNumParams() &&
5330 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5331 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(FirstIRArg));
5333 if (ArgHasMaybeUndefAttr)
5335 IRCallArgs[FirstIRArg] =
V;
5339 llvm::StructType *STy =
5343 [[maybe_unused]] llvm::TypeSize SrcTypeSize =
5345 [[maybe_unused]] llvm::TypeSize DstTypeSize =
5347 if (STy->containsHomogeneousScalableVectorTypes()) {
5348 assert(SrcTypeSize == DstTypeSize &&
5349 "Only allow non-fractional movement of structure with "
5350 "homogeneous scalable vector type");
5352 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5359 if (!I->isAggregate()) {
5361 I->copyInto(*
this, Src);
5363 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5364 : I->getKnownRValue().getAggregateAddress();
5374 llvm::TypeSize SrcTypeSize =
5377 if (SrcTypeSize.isScalable()) {
5378 assert(STy->containsHomogeneousScalableVectorTypes() &&
5379 "ABI only supports structure with homogeneous scalable vector "
5381 assert(SrcTypeSize == DstTypeSize &&
5382 "Only allow non-fractional movement of structure with "
5383 "homogeneous scalable vector type");
5384 assert(NumIRArgs == STy->getNumElements());
5386 llvm::Value *StoredStructValue =
5388 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5389 llvm::Value *Extract =
Builder.CreateExtractValue(
5390 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5391 IRCallArgs[FirstIRArg + i] = Extract;
5394 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5395 uint64_t DstSize = DstTypeSize.getFixedValue();
5401 if (SrcSize < DstSize) {
5410 assert(NumIRArgs == STy->getNumElements());
5411 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5414 if (ArgHasMaybeUndefAttr)
5415 LI =
Builder.CreateFreeze(LI);
5416 IRCallArgs[FirstIRArg + i] = LI;
5421 assert(NumIRArgs == 1);
5429 auto *ATy = dyn_cast<llvm::ArrayType>(
Load->getType());
5430 if (ATy !=
nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5434 if (ArgHasMaybeUndefAttr)
5436 IRCallArgs[FirstIRArg] =
Load;
5446 llvm::Value *tempSize =
nullptr;
5449 if (I->isAggregate()) {
5450 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5451 : I->getKnownRValue().getAggregateAddress();
5454 RValue RV = I->getKnownRValue();
5466 nullptr, &AllocaAddr);
5474 unsigned IRArgPos = FirstIRArg;
5475 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5476 llvm::Type *eltType = coercionType->getElementType(i);
5480 if (ArgHasMaybeUndefAttr)
5481 elt =
Builder.CreateFreeze(elt);
5482 IRCallArgs[IRArgPos++] = elt;
5484 assert(IRArgPos == FirstIRArg + NumIRArgs);
5494 unsigned IRArgPos = FirstIRArg;
5495 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5496 assert(IRArgPos == FirstIRArg + NumIRArgs);
5502 const CGCallee &ConcreteCallee =
Callee.prepareConcreteCallee(*
this);
5508 assert(IRFunctionArgs.hasInallocaArg());
5509 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5520 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5521 llvm::Value *Ptr) -> llvm::Function * {
5522 if (!CalleeFT->isVarArg())
5526 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5527 if (CE->getOpcode() == llvm::Instruction::BitCast)
5528 Ptr = CE->getOperand(0);
5531 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5535 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5539 if (OrigFT->isVarArg() ||
5540 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5541 OrigFT->getReturnType() != CalleeFT->getReturnType())
5544 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5545 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5551 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5553 IRFuncTy = OrigFn->getFunctionType();
5568 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5569 for (
unsigned i = 0; i < IRCallArgs.size(); ++i) {
5571 if (IRFunctionArgs.hasInallocaArg() &&
5572 i == IRFunctionArgs.getInallocaArgNo())
5574 if (i < IRFuncTy->getNumParams())
5575 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5580 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5581 LargestVectorWidth = std::max(LargestVectorWidth,
5586 llvm::AttributeList Attrs;
5592 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5593 getTarget().getTriple().isWindowsArm64EC()) {
5594 CGM.
Error(
Loc,
"__vectorcall calling convention is not currently "
5599 if (FD->hasAttr<StrictFPAttr>())
5601 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5606 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5612 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5616 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5621 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5626 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5635 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>())) {
5637 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5642 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5649 CannotThrow =
false;
5658 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5660 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5661 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5669 if (UnusedReturnSizePtr)
5671 UnusedReturnSizePtr);
5673 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr :
getInvokeDest();
5679 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5686 if (FD->hasAttr<StrictFPAttr>())
5688 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5690 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
5691 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5693 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
5694 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5699 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5702 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5706 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5707 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
5716 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
5717 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
5718 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5724 CI->setAttributes(Attrs);
5725 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
5729 if (!CI->getType()->isVoidTy())
5730 CI->setName(
"call");
5736 LargestVectorWidth =
5742 if (!CI->getCalledFunction())
5749 AddObjCARCExceptionMetadata(CI);
5752 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
5753 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
5754 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5755 else if (IsMustTail) {
5762 else if (
Call->isIndirectCall())
5764 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
5765 if (!cast<FunctionDecl>(TargetDecl)->isDefined())
5770 {cast<FunctionDecl>(TargetDecl),
Loc});
5774 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
5775 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
5782 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5788 TargetDecl->
hasAttr<MSAllocatorAttr>())
5792 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
5793 llvm::ConstantInt *
Line =
5795 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(
Line);
5797 CI->setMetadata(
"srcloc", MDT);
5805 if (CI->doesNotReturn()) {
5806 if (UnusedReturnSizePtr)
5810 if (
SanOpts.
has(SanitizerKind::Unreachable)) {
5813 if (
auto *F = CI->getCalledFunction())
5814 F->removeFnAttr(llvm::Attribute::NoReturn);
5815 CI->removeFnAttr(llvm::Attribute::NoReturn);
5820 SanitizerKind::KernelAddress)) {
5821 SanitizerScope SanScope(
this);
5822 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
5824 auto *FnType = llvm::FunctionType::get(
CGM.
VoidTy,
false);
5825 llvm::FunctionCallee
Fn =
5832 Builder.ClearInsertionPoint();
5852 if (CI->getType()->isVoidTy())
5856 Builder.ClearInsertionPoint();
5862 if (swiftErrorTemp.
isValid()) {
5881 if (IsVirtualFunctionPointerThunk) {
5892 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5894 unsigned unpaddedIndex = 0;
5895 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5896 llvm::Type *eltType = coercionType->getElementType(i);
5900 llvm::Value *elt = CI;
5901 if (requiresExtract)
5902 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
5904 assert(unpaddedIndex == 0);
5913 if (UnusedReturnSizePtr)
5930 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
5931 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
5939 llvm::Value *
V = CI;
5940 if (
V->getType() != RetIRTy)
5950 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
5951 llvm::Value *
V = CI;
5952 if (
auto *ScalableSrcTy =
5953 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
5954 if (FixedDstTy->getElementType() ==
5955 ScalableSrcTy->getElementType()) {
5957 V =
Builder.CreateExtractVector(FixedDstTy,
V, Zero,
5971 DestIsVolatile =
false;
5992 llvm_unreachable(
"Invalid ABI kind for return argument");
5995 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6000 if (
Ret.isScalar() && TargetDecl) {
6001 AssumeAlignedAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6002 AllocAlignAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6007 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
6008 LifetimeEnd.Emit(*
this, {});
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsWindows)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
enum clang::sema::@1656::IndirectLocalPathEntry::EntryKind Kind
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin=false) const
Retrieves the default calling convention for the current target.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const LLVM_READONLY
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StringRef getName() const
Return the IR name of the pointer value.
llvm::PointerType * getType() const
Return the type of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
const BlockExpr * BlockExpression
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
const TargetInfo & getTarget() const
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
CGDebugInfo * getDebugInfo()
Address EmitVAListRef(const Expr *E)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Type * ConvertType(QualType T)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
CodeGenTypes & getTypes() const
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const CallExpr * MustTailCall
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
CallType * addControlledConvergenceToken(CallType *Input)
This class organizes the cross-function state that is used while generating LLVM code.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const ABIInfo & getABIInfo()
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
void addUndefinedGlobalForTailCall(std::pair< const FunctionDecl *, SourceLocation > Global)
ObjCEntrypoints & getObjCEntrypoints() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD)
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
EHScopeStack::Cleanup * getCleanup()
Information for lazily generating a cleanup.
virtual bool isRedundantBeforeReturn()
A saved depth on the scope stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator end() const
Returns an iterator pointing to the outermost EH scope.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
bool isVolatileQualified() const
LangAS getAddressSpace() const
CharUnits getAlignment() const
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Qualifiers::ObjCLifetime getObjCLifetime() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isZeroLengthBitField(const ASTContext &Ctx) const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isVariadic() const
Whether this function prototype is variadic.
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
const Decl * getDecl() const
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
bool isParamDestroyedInCallee() const
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
The base class of the type hierarchy.
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
bool isBlockPointerType() const
bool isIncompleteArrayType() const
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
QualType getCanonicalTypeInternal() const
bool isMemberPointerType() const
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isObjCRetainableType() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
const Expr * getSubExpr() const
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
bool This(InterpState &S, CodePtr OpPC)
bool Zero(InterpState &S, CodePtr OpPC)
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ Result
The result type of a method or function.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::BasicBlock * getBlock() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
LangAS getASTAllocaAddressSpace() const
bool isMSVCXXPersonality() const
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Iterator for iterating over Stmt * arrays that contain only T *.