32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
46using namespace CodeGen;
52 default:
return llvm::CallingConv::C;
57 case CC_Win64:
return llvm::CallingConv::Win64;
59 case CC_AAPCS:
return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
73 case CC_Swift:
return llvm::CallingConv::Swift;
75 case CC_M68kRTD:
return llvm::CallingConv::M68k_RTD;
129 unsigned totalArgs) {
131 assert(paramInfos.size() <= prefixArgs);
132 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
134 paramInfos.reserve(totalArgs);
137 paramInfos.resize(prefixArgs);
141 paramInfos.push_back(ParamInfo);
143 if (ParamInfo.hasPassObjectSize())
144 paramInfos.emplace_back();
147 assert(paramInfos.size() <= totalArgs &&
148 "Did we forget to insert pass_object_size args?");
150 paramInfos.resize(totalArgs);
160 if (!FPT->hasExtParameterInfos()) {
161 assert(paramInfos.empty() &&
162 "We have paramInfos, but the prototype doesn't?");
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
167 unsigned PrefixSize = prefix.size();
171 prefix.reserve(prefix.size() + FPT->getNumParams());
173 auto ExtInfos = FPT->getExtParameterInfos();
174 assert(ExtInfos.size() == FPT->getNumParams());
175 for (
unsigned I = 0,
E = FPT->getNumParams(); I !=
E; ++I) {
176 prefix.push_back(FPT->getParamType(I));
177 if (ExtInfos[I].hasPassObjectSize())
200 FTP->getExtInfo(), paramInfos,
Required);
208 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
233 if (PcsAttr *PCS =
D->
getAttr<PcsAttr>())
236 if (
D->
hasAttr<AArch64VectorPcsAttr>())
239 if (
D->
hasAttr<AArch64SVEPcsAttr>())
242 if (
D->
hasAttr<AMDGPUKernelCallAttr>())
266 if (
D->
hasAttr<RISCVVectorCCAttr>())
287 return ::arrangeLLVMFunctionInfo(
288 *
this,
true, argTypes,
295 if (FD->
hasAttr<CUDAGlobalAttr>()) {
308 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
309 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
330 !
Target.getCXXABI().hasConstructorVariants();
335 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
343 bool PassParams =
true;
345 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
348 if (
auto Inherited = CD->getInheritedConstructor())
360 if (!paramInfos.empty()) {
363 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
366 paramInfos.append(AddedArgs.
Suffix,
371 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
381 argTypes, extInfo, paramInfos, required);
387 for (
auto &arg : args)
395 for (
auto &arg : args)
402 unsigned prefixArgs,
unsigned totalArgs) {
422 unsigned ExtraPrefixArgs,
423 unsigned ExtraSuffixArgs,
424 bool PassProtoArgs) {
427 for (
const auto &Arg : args)
431 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
436 FPT, TotalPrefixArgs + ExtraSuffixArgs)
450 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
457 ArgTypes, Info, ParamInfos,
Required);
465 if (MD->isImplicitObjectMemberFunction())
470 assert(isa<FunctionType>(FTy));
477 std::nullopt, noProto->getExtInfo(), {},
512 I->hasAttr<NoEscapeAttr>());
513 extParamInfos.push_back(extParamInfo);
520 if (
getContext().getLangOpts().ObjCAutoRefCount &&
521 MD->
hasAttr<NSReturnsRetainedAttr>())
547 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
548 isa<CXXDestructorDecl>(GD.
getDecl()))
561 assert(MD->
isVirtual() &&
"only methods have thunks");
578 ArgTys.push_back(*FTP->param_type_begin());
580 ArgTys.push_back(Context.
IntTy);
595 unsigned numExtraRequiredArgs,
597 assert(args.size() >= numExtraRequiredArgs);
607 if (proto->isVariadic())
610 if (proto->hasExtParameterInfos())
620 cast<FunctionNoProtoType>(fnType))) {
626 for (
const auto &arg : args)
631 paramInfos, required);
643 chainCall ? 1 : 0, chainCall);
672 for (
const auto &Arg : args)
705 unsigned numPrefixArgs) {
706 assert(numPrefixArgs + 1 <= args.size() &&
707 "Emitting a call with less args than the required prefix?");
719 paramInfos, required);
731 assert(signature.
arg_size() <= args.size());
732 if (signature.
arg_size() == args.size())
737 if (!sigParamInfos.empty()) {
738 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
739 paramInfos.resize(args.size());
771 assert(llvm::all_of(argTypes,
775 llvm::FoldingSetNodeID ID;
780 bool isDelegateCall =
783 info, paramInfos, required, resultType, argTypes);
785 void *insertPos =
nullptr;
786 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
794 info, paramInfos, resultType, argTypes, required);
795 FunctionInfos.InsertNode(FI, insertPos);
797 bool inserted = FunctionsBeingProcessed.insert(FI).second;
799 assert(inserted &&
"Recursively being processed?");
802 if (CC == llvm::CallingConv::SPIR_KERNEL) {
820 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
823 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
824 assert(erased &&
"Not in set?");
830 bool chainCall,
bool delegateCall,
836 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
841 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
842 argTypes.size() + 1, paramInfos.size()));
845 FI->CallingConvention = llvmCC;
846 FI->EffectiveCallingConvention = llvmCC;
847 FI->ASTCallingConvention = info.
getCC();
848 FI->InstanceMethod = instanceMethod;
849 FI->ChainCall = chainCall;
850 FI->DelegateCall = delegateCall;
856 FI->Required = required;
859 FI->ArgStruct =
nullptr;
860 FI->ArgStructAlign = 0;
861 FI->NumArgs = argTypes.size();
862 FI->HasExtParameterInfos = !paramInfos.empty();
863 FI->getArgsBuffer()[0].
type = resultType;
864 FI->MaxVectorWidth = 0;
865 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
866 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
867 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
868 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
878struct TypeExpansion {
879 enum TypeExpansionKind {
891 const TypeExpansionKind
Kind;
893 TypeExpansion(TypeExpansionKind K) :
Kind(K) {}
894 virtual ~TypeExpansion() {}
897struct ConstantArrayExpansion : TypeExpansion {
901 ConstantArrayExpansion(
QualType EltTy, uint64_t NumElts)
902 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
903 static bool classof(
const TypeExpansion *TE) {
904 return TE->Kind == TEK_ConstantArray;
908struct RecordExpansion : TypeExpansion {
915 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
916 Fields(
std::move(Fields)) {}
917 static bool classof(
const TypeExpansion *TE) {
918 return TE->Kind == TEK_Record;
922struct ComplexExpansion : TypeExpansion {
926 static bool classof(
const TypeExpansion *TE) {
931struct NoExpansion : TypeExpansion {
932 NoExpansion() : TypeExpansion(TEK_None) {}
933 static bool classof(
const TypeExpansion *TE) {
934 return TE->Kind == TEK_None;
939static std::unique_ptr<TypeExpansion>
942 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
950 "Cannot expand structure with flexible array.");
957 for (
const auto *FD : RD->
fields()) {
958 if (FD->isZeroLengthBitField(Context))
960 assert(!FD->isBitField() &&
961 "Cannot expand structure with bit-field members.");
963 if (UnionSize < FieldSize) {
964 UnionSize = FieldSize;
969 Fields.push_back(LargestFD);
971 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
972 assert(!CXXRD->isDynamicClass() &&
973 "cannot expand vtable pointers in dynamic classes");
974 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
977 for (
const auto *FD : RD->
fields()) {
978 if (FD->isZeroLengthBitField(Context))
980 assert(!FD->isBitField() &&
981 "Cannot expand structure with bit-field members.");
982 Fields.push_back(FD);
985 return std::make_unique<RecordExpansion>(std::move(Bases),
989 return std::make_unique<ComplexExpansion>(CT->getElementType());
991 return std::make_unique<NoExpansion>();
996 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
999 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1001 for (
auto BS : RExp->Bases)
1003 for (
auto FD : RExp->Fields)
1007 if (isa<ComplexExpansion>(Exp.get()))
1009 assert(isa<NoExpansion>(Exp.get()));
1017 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1018 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1021 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1022 for (
auto BS : RExp->Bases)
1024 for (
auto FD : RExp->Fields)
1026 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1031 assert(isa<NoExpansion>(Exp.get()));
1037 ConstantArrayExpansion *CAE,
1039 llvm::function_ref<
void(
Address)> Fn) {
1040 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1047 llvm::Function::arg_iterator &AI) {
1049 "Unexpected non-simple lvalue during struct expansion.");
1052 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1055 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1056 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1058 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1068 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1070 for (
auto FD : RExp->Fields) {
1073 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1075 }
else if (isa<ComplexExpansion>(Exp.get())) {
1076 auto realValue = &*AI++;
1077 auto imagValue = &*AI++;
1082 assert(isa<NoExpansion>(Exp.get()));
1083 llvm::Value *Arg = &*AI++;
1090 if (Arg->getType()->isPointerTy()) {
1099void CodeGenFunction::ExpandTypeToArgs(
1103 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1107 *
this, CAExp, Addr, [&](
Address EltAddr) {
1111 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1114 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1125 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1130 for (
auto FD : RExp->Fields) {
1133 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1136 }
else if (isa<ComplexExpansion>(Exp.get())) {
1138 IRCallArgs[IRCallArgPos++] = CV.first;
1139 IRCallArgs[IRCallArgPos++] = CV.second;
1141 assert(isa<NoExpansion>(Exp.get()));
1143 assert(RV.isScalar() &&
1144 "Unexpected non-scalar rvalue during struct expansion.");
1147 llvm::Value *
V = RV.getScalarVal();
1148 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1149 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1150 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1152 IRCallArgs[IRCallArgPos++] =
V;
1160 const Twine &Name =
"tmp") {
1174 llvm::StructType *SrcSTy,
1177 if (SrcSTy->getNumElements() == 0)
return SrcPtr;
1185 uint64_t FirstEltSize =
1187 if (FirstEltSize < DstSize &&
1196 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1212 if (Val->getType() == Ty)
1215 if (isa<llvm::PointerType>(Val->getType())) {
1217 if (isa<llvm::PointerType>(Ty))
1218 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1224 llvm::Type *DestIntTy = Ty;
1225 if (isa<llvm::PointerType>(DestIntTy))
1228 if (Val->getType() != DestIntTy) {
1230 if (DL.isBigEndian()) {
1233 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1234 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1236 if (SrcSize > DstSize) {
1237 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1238 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1240 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1241 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1245 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1249 if (isa<llvm::PointerType>(Ty))
1250 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1273 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1275 DstSize.getFixedValue(), CGF);
1283 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1284 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1290 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1291 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1305 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1306 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1309 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1310 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1311 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1312 ScalableDstTy = llvm::ScalableVectorType::get(
1313 FixedSrcTy->getElementType(),
1314 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1316 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1318 auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
1319 auto *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1321 ScalableDstTy, UndefVec, Load, Zero,
"cast.scalable");
1322 if (ScalableDstTy != Ty)
1335 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1340 llvm::TypeSize DstSize,
1341 bool DstIsVolatile) {
1345 llvm::Type *SrcTy = Src->getType();
1352 if (llvm::StructType *DstSTy =
1354 assert(!SrcSize.isScalable());
1356 SrcSize.getFixedValue(), *
this);
1360 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1361 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1366 }
else if (llvm::StructType *STy =
1367 dyn_cast<llvm::StructType>(Src->getType())) {
1370 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1372 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1378 }
else if (SrcTy->isIntegerTy()) {
1380 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1418class ClangToLLVMArgMapping {
1419 static const unsigned InvalidIndex = ~0
U;
1420 unsigned InallocaArgNo;
1422 unsigned TotalIRArgs;
1426 unsigned PaddingArgIndex;
1429 unsigned FirstArgIndex;
1430 unsigned NumberOfArgs;
1433 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1441 bool OnlyRequiredArgs =
false)
1442 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1443 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1444 construct(Context, FI, OnlyRequiredArgs);
1447 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1448 unsigned getInallocaArgNo()
const {
1449 assert(hasInallocaArg());
1450 return InallocaArgNo;
1453 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1454 unsigned getSRetArgNo()
const {
1455 assert(hasSRetArg());
1459 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1461 bool hasPaddingArg(
unsigned ArgNo)
const {
1462 assert(ArgNo < ArgInfo.size());
1463 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1465 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1466 assert(hasPaddingArg(ArgNo));
1467 return ArgInfo[ArgNo].PaddingArgIndex;
1472 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1473 assert(ArgNo < ArgInfo.size());
1474 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1475 ArgInfo[ArgNo].NumberOfArgs);
1480 bool OnlyRequiredArgs);
1483void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1485 bool OnlyRequiredArgs) {
1486 unsigned IRArgNo = 0;
1487 bool SwapThisWithSRet =
false;
1492 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1503 auto &IRArgs = ArgInfo[ArgNo];
1506 IRArgs.PaddingArgIndex = IRArgNo++;
1512 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1514 IRArgs.NumberOfArgs = STy->getNumElements();
1516 IRArgs.NumberOfArgs = 1;
1522 IRArgs.NumberOfArgs = 1;
1527 IRArgs.NumberOfArgs = 0;
1537 if (IRArgs.NumberOfArgs > 0) {
1538 IRArgs.FirstArgIndex = IRArgNo;
1539 IRArgNo += IRArgs.NumberOfArgs;
1544 if (IRArgNo == 1 && SwapThisWithSRet)
1547 assert(ArgNo == ArgInfo.size());
1550 InallocaArgNo = IRArgNo++;
1552 TotalIRArgs = IRArgNo;
1560 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1575 switch (BT->getKind()) {
1578 case BuiltinType::Float:
1580 case BuiltinType::Double:
1582 case BuiltinType::LongDouble:
1593 if (BT->getKind() == BuiltinType::LongDouble)
1609 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1611 assert(Inserted &&
"Recursively being processed?");
1613 llvm::Type *resultType =
nullptr;
1618 llvm_unreachable(
"Invalid ABI kind for return argument");
1630 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1646 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1650 if (IRFunctionArgs.hasSRetArg()) {
1653 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1658 if (IRFunctionArgs.hasInallocaArg())
1659 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1666 for (; it != ie; ++it, ++ArgNo) {
1670 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1671 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1674 unsigned FirstIRArg, NumIRArgs;
1675 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1680 assert(NumIRArgs == 0);
1684 assert(NumIRArgs == 1);
1686 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1690 assert(NumIRArgs == 1);
1691 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1699 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1701 assert(NumIRArgs == st->getNumElements());
1702 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1703 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1705 assert(NumIRArgs == 1);
1706 ArgTypes[FirstIRArg] = argType;
1712 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1714 *ArgTypesIter++ = EltTy;
1716 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1721 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1723 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1728 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1729 assert(Erased &&
"Not in set?");
1731 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1745 llvm::AttrBuilder &FuncAttrs,
1752 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1756 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1758 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1762 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1764 FuncAttrs.addAttribute(
"aarch64_in_za");
1766 FuncAttrs.addAttribute(
"aarch64_out_za");
1768 FuncAttrs.addAttribute(
"aarch64_inout_za");
1772 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1774 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1776 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1778 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1782 const Decl *Callee) {
1788 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1789 AA->getAssumption().split(Attrs,
",");
1792 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1793 llvm::join(Attrs.begin(), Attrs.end(),
","));
1802 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1803 return ClassDecl->hasTrivialDestructor();
1809 const Decl *TargetDecl) {
1815 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1819 if (!
Module.getLangOpts().CPlusPlus)
1822 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1823 if (FDecl->isExternC())
1825 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1827 if (VDecl->isExternC())
1835 return Module.getCodeGenOpts().StrictReturn ||
1836 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1837 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1844 llvm::DenormalMode FP32DenormalMode,
1845 llvm::AttrBuilder &FuncAttrs) {
1846 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1847 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1849 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1850 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1858 llvm::AttrBuilder &FuncAttrs) {
1864 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1866 llvm::AttrBuilder &FuncAttrs) {
1869 if (CodeGenOpts.OptimizeSize)
1870 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1871 if (CodeGenOpts.OptimizeSize == 2)
1872 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1875 if (CodeGenOpts.DisableRedZone)
1876 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1877 if (CodeGenOpts.IndirectTlsSegRefs)
1878 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1879 if (CodeGenOpts.NoImplicitFloat)
1880 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1882 if (AttrOnCallSite) {
1887 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1889 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1891 switch (CodeGenOpts.getFramePointer()) {
1898 FuncAttrs.addAttribute(
"frame-pointer",
1900 CodeGenOpts.getFramePointer()));
1903 if (CodeGenOpts.LessPreciseFPMAD)
1904 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
1906 if (CodeGenOpts.NullPointerIsValid)
1907 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1910 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
1914 if (LangOpts.NoHonorInfs)
1915 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
1916 if (LangOpts.NoHonorNaNs)
1917 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
1918 if (LangOpts.ApproxFunc)
1919 FuncAttrs.addAttribute(
"approx-func-fp-math",
"true");
1920 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1921 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1922 (LangOpts.getDefaultFPContractMode() ==
1924 LangOpts.getDefaultFPContractMode() ==
1926 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
1927 if (CodeGenOpts.SoftFloat)
1928 FuncAttrs.addAttribute(
"use-soft-float",
"true");
1929 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
1930 llvm::utostr(CodeGenOpts.SSPBufferSize));
1931 if (LangOpts.NoSignedZero)
1932 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
1935 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
1936 if (!Recips.empty())
1937 FuncAttrs.addAttribute(
"reciprocal-estimates",
1938 llvm::join(Recips,
","));
1942 FuncAttrs.addAttribute(
"prefer-vector-width",
1945 if (CodeGenOpts.StackRealignment)
1946 FuncAttrs.addAttribute(
"stackrealign");
1947 if (CodeGenOpts.Backchain)
1948 FuncAttrs.addAttribute(
"backchain");
1949 if (CodeGenOpts.EnableSegmentedStacks)
1950 FuncAttrs.addAttribute(
"split-stack");
1952 if (CodeGenOpts.SpeculativeLoadHardening)
1953 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1956 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1957 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1958 FuncAttrs.removeAttribute(
"zero-call-used-regs");
1960 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1961 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
1963 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1964 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
1966 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1967 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
1969 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1970 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
1972 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1973 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
1975 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1976 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
1978 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1979 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
1981 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
1982 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
1993 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1998 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
1999 LangOpts.SYCLIsDevice) {
2000 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2003 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2004 FuncAttrs.addAttribute(
"save-reg-params");
2007 StringRef Var,
Value;
2009 FuncAttrs.addAttribute(Var,
Value);
2023 const llvm::Function &F,
2025 auto FFeatures = F.getFnAttribute(
"target-features");
2027 llvm::StringSet<> MergedNames;
2029 MergedFeatures.reserve(TargetOpts.
Features.size());
2031 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2032 for (StringRef Feature : FeatureRange) {
2033 if (Feature.empty())
2035 assert(Feature[0] ==
'+' || Feature[0] ==
'-');
2036 StringRef Name = Feature.drop_front(1);
2037 bool Merged = !MergedNames.insert(Name).second;
2039 MergedFeatures.push_back(Feature);
2043 if (FFeatures.isValid())
2044 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2045 AddUnmergedFeatures(TargetOpts.
Features);
2047 if (!MergedFeatures.empty()) {
2048 llvm::sort(MergedFeatures);
2049 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2056 bool WillInternalize) {
2058 llvm::AttrBuilder FuncAttrs(F.getContext());
2061 if (!TargetOpts.
CPU.empty())
2062 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2063 if (!TargetOpts.
TuneCPU.empty())
2064 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2067 CodeGenOpts, LangOpts,
2070 if (!WillInternalize && F.isInterposable()) {
2075 F.addFnAttrs(FuncAttrs);
2079 llvm::AttributeMask AttrsToRemove;
2081 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2082 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2083 llvm::DenormalMode Merged =
2087 if (DenormModeToMergeF32.isValid()) {
2092 if (Merged == llvm::DenormalMode::getDefault()) {
2093 AttrsToRemove.addAttribute(
"denormal-fp-math");
2094 }
else if (Merged != DenormModeToMerge) {
2096 FuncAttrs.addAttribute(
"denormal-fp-math",
2100 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2101 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2102 }
else if (MergedF32 != DenormModeToMergeF32) {
2104 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2108 F.removeFnAttrs(AttrsToRemove);
2113 F.addFnAttrs(FuncAttrs);
2116void CodeGenModule::getTrivialDefaultFunctionAttributes(
2117 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2118 llvm::AttrBuilder &FuncAttrs) {
2119 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2124void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2126 bool AttrOnCallSite,
2127 llvm::AttrBuilder &FuncAttrs) {
2128 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2132 if (!AttrOnCallSite)
2137 llvm::AttrBuilder &attrs) {
2138 getDefaultFunctionAttributes(
"",
false,
2140 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2145 const NoBuiltinAttr *NBA =
nullptr) {
2146 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2148 AttributeName +=
"no-builtin-";
2149 AttributeName += BuiltinName;
2150 FuncAttrs.addAttribute(AttributeName);
2154 if (LangOpts.NoBuiltin) {
2156 FuncAttrs.addAttribute(
"no-builtins");
2170 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2171 FuncAttrs.addAttribute(
"no-builtins");
2176 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2180 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2181 bool CheckCoerce =
true) {
2182 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2188 if (!DL.typeSizeEqualsStoreSize(Ty))
2195 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2196 DL.getTypeSizeInBits(Ty)))
2220 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2222 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2231 unsigned NumRequiredArgs,
unsigned ArgNo) {
2232 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2237 if (ArgNo >= NumRequiredArgs)
2241 if (ArgNo < FD->getNumParams()) {
2242 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2243 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2260 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2263 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2265 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2266 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2275 llvm::FPClassTest Mask = llvm::fcNone;
2276 if (LangOpts.NoHonorInfs)
2277 Mask |= llvm::fcInf;
2278 if (LangOpts.NoHonorNaNs)
2279 Mask |= llvm::fcNan;
2285 llvm::AttributeList &Attrs) {
2286 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2287 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2288 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2314 llvm::AttributeList &AttrList,
2316 bool AttrOnCallSite,
bool IsThunk) {
2324 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2326 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2338 bool HasOptnone =
false;
2340 const NoBuiltinAttr *NBA =
nullptr;
2344 auto AddPotentialArgAccess = [&]() {
2345 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2347 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2348 llvm::MemoryEffects::argMemOnly());
2355 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2356 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2357 if (TargetDecl->
hasAttr<NoThrowAttr>())
2358 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2359 if (TargetDecl->
hasAttr<NoReturnAttr>())
2360 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2361 if (TargetDecl->
hasAttr<ColdAttr>())
2362 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2363 if (TargetDecl->
hasAttr<HotAttr>())
2364 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2365 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2366 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2367 if (TargetDecl->
hasAttr<ConvergentAttr>())
2368 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2370 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2373 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2375 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2377 (Kind == OO_New || Kind == OO_Array_New))
2378 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2381 const bool IsVirtualCall = MD && MD->
isVirtual();
2384 if (!(AttrOnCallSite && IsVirtualCall)) {
2385 if (Fn->isNoReturn())
2386 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2387 NBA = Fn->getAttr<NoBuiltinAttr>();
2391 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2394 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2395 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2399 if (TargetDecl->
hasAttr<ConstAttr>()) {
2400 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2401 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2404 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2405 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2406 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2407 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2409 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2410 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2411 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2412 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2414 if (TargetDecl->
hasAttr<RestrictAttr>())
2415 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2416 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2417 !CodeGenOpts.NullPointerIsValid)
2418 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2419 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2420 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2421 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2422 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2423 if (TargetDecl->
hasAttr<LeafAttr>())
2424 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2426 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2427 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2428 std::optional<unsigned> NumElemsParam;
2429 if (AllocSize->getNumElemsParam().isValid())
2430 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2431 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2435 if (TargetDecl->
hasAttr<OpenCLKernelAttr>()) {
2438 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2445 FuncAttrs.addAttribute(
2446 "uniform-work-group-size",
2447 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2451 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2453 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2455 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2456 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2468 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2473 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2474 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2475 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2476 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2477 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2478 FuncAttrs.removeAttribute(
"split-stack");
2479 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2482 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2483 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2484 FuncAttrs.addAttribute(
2485 "zero-call-used-regs",
2486 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2493 if (CodeGenOpts.NoPLT) {
2494 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2495 if (!Fn->isDefined() && !AttrOnCallSite) {
2496 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2501 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2502 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2507 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2508 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2509 if (!FD->isExternallyVisible())
2510 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2517 if (!AttrOnCallSite) {
2518 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2519 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2522 auto shouldDisableTailCalls = [&] {
2524 if (CodeGenOpts.DisableTailCalls)
2530 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2531 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2534 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2535 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2536 if (!BD->doesNotEscape())
2542 if (shouldDisableTailCalls())
2543 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2547 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2551 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2558 if (CodeGenOpts.EnableNoundefAttrs &&
2562 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2568 RetAttrs.addAttribute(llvm::Attribute::SExt);
2570 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2574 RetAttrs.addAttribute(llvm::Attribute::InReg);
2586 AddPotentialArgAccess();
2595 llvm_unreachable(
"Invalid ABI kind for return argument");
2603 RetAttrs.addDereferenceableAttr(
2605 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2606 !CodeGenOpts.NullPointerIsValid)
2607 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2609 llvm::Align Alignment =
2611 RetAttrs.addAlignmentAttr(Alignment);
2616 bool hasUsedSRet =
false;
2620 if (IRFunctionArgs.hasSRetArg()) {
2622 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2623 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2624 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2627 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2629 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2634 if (IRFunctionArgs.hasInallocaArg()) {
2637 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2646 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2648 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2655 if (!CodeGenOpts.NullPointerIsValid &&
2657 Attrs.addAttribute(llvm::Attribute::NonNull);
2664 Attrs.addDereferenceableOrNullAttr(
2670 llvm::Align Alignment =
2674 Attrs.addAlignmentAttr(Alignment);
2676 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2682 I !=
E; ++I, ++ArgNo) {
2688 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2690 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2691 llvm::AttributeSet::get(
2693 llvm::AttrBuilder(
getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2698 if (CodeGenOpts.EnableNoundefAttrs &&
2700 Attrs.addAttribute(llvm::Attribute::NoUndef);
2709 Attrs.addAttribute(llvm::Attribute::SExt);
2711 Attrs.addAttribute(llvm::Attribute::ZExt);
2715 Attrs.addAttribute(llvm::Attribute::Nest);
2717 Attrs.addAttribute(llvm::Attribute::InReg);
2718 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2725 Attrs.addAttribute(llvm::Attribute::InReg);
2728 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2731 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2732 Decl->getArgPassingRestrictions() ==
2736 Attrs.addAttribute(llvm::Attribute::NoAlias);
2761 AddPotentialArgAccess();
2766 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2777 AddPotentialArgAccess();
2784 Attrs.addDereferenceableAttr(
2786 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2787 !CodeGenOpts.NullPointerIsValid)
2788 Attrs.addAttribute(llvm::Attribute::NonNull);
2790 llvm::Align Alignment =
2792 Attrs.addAlignmentAttr(Alignment);
2800 if (TargetDecl && TargetDecl->
hasAttr<OpenCLKernelAttr>() &&
2804 llvm::Align Alignment =
2806 Attrs.addAlignmentAttr(Alignment);
2818 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2823 Attrs.addAttribute(llvm::Attribute::NoAlias);
2827 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2829 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2830 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2836 Attrs.addAttribute(llvm::Attribute::SwiftError);
2840 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2844 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2849 Attrs.addAttribute(llvm::Attribute::NoCapture);
2851 if (Attrs.hasAttributes()) {
2852 unsigned FirstIRArg, NumIRArgs;
2853 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2854 for (
unsigned i = 0; i < NumIRArgs; i++)
2855 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2861 AttrList = llvm::AttributeList::get(
2870 llvm::Value *value) {
2871 llvm::Type *varType = CGF.
ConvertType(var->getType());
2875 if (value->getType() == varType)
return value;
2877 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2878 &&
"unexpected promotion type");
2880 if (isa<llvm::IntegerType>(varType))
2881 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
2883 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
2889 QualType ArgType,
unsigned ArgNo) {
2901 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
2908 if (NNAttr->isNonNull(ArgNo))
2938 if (FD->hasImplicitReturnZero()) {
2939 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2941 llvm::Constant*
Zero = llvm::Constant::getNullValue(LLVMTy);
2950 assert(
Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2955 if (IRFunctionArgs.hasInallocaArg())
2956 ArgStruct =
Address(
Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2960 if (IRFunctionArgs.hasSRetArg()) {
2961 auto AI =
Fn->getArg(IRFunctionArgs.getSRetArgNo());
2962 AI->setName(
"agg.result");
2963 AI->addAttr(llvm::Attribute::NoAlias);
2970 ArgVals.reserve(Args.size());
2976 assert(FI.
arg_size() == Args.size() &&
2977 "Mismatch between function signature & arguments.");
2980 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2981 i != e; ++i, ++info_it, ++ArgNo) {
2986 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2994 unsigned FirstIRArg, NumIRArgs;
2995 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2999 assert(NumIRArgs == 0);
3012 assert(NumIRArgs == 1);
3036 ParamAddr = AlignedTemp;
3053 auto AI =
Fn->getArg(FirstIRArg);
3061 assert(NumIRArgs == 1);
3063 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3066 PVD->getFunctionScopeIndex()) &&
3068 AI->addAttr(llvm::Attribute::NonNull);
3070 QualType OTy = PVD->getOriginalType();
3071 if (
const auto *ArrTy =
3078 QualType ETy = ArrTy->getElementType();
3079 llvm::Align Alignment =
3081 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3082 uint64_t ArrSize = ArrTy->getZExtSize();
3086 Attrs.addDereferenceableAttr(
3087 getContext().getTypeSizeInChars(ETy).getQuantity() *
3089 AI->addAttrs(Attrs);
3090 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3093 AI->addAttr(llvm::Attribute::NonNull);
3096 }
else if (
const auto *ArrTy =
3102 QualType ETy = ArrTy->getElementType();
3103 llvm::Align Alignment =
3105 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3106 if (!
getTypes().getTargetAddressSpace(ETy) &&
3108 AI->addAttr(llvm::Attribute::NonNull);
3113 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3116 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3117 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3121 llvm::ConstantInt *AlignmentCI =
3124 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3125 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3126 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3127 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(
3128 llvm::Align(AlignmentInt)));
3135 AI->addAttr(llvm::Attribute::NoAlias);
3143 assert(NumIRArgs == 1);
3147 llvm::Value *
V = AI;
3155 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3178 if (
V->getType() != LTy)
3189 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3190 llvm::Value *Coerced =
Fn->getArg(FirstIRArg);
3191 if (
auto *VecTyFrom =
3192 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3195 if (VecTyFrom->getElementType()->isIntegerTy(1) &&
3196 VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
3197 VecTyTo->getElementType() ==
Builder.getInt8Ty()) {
3198 VecTyFrom = llvm::ScalableVectorType::get(
3199 VecTyTo->getElementType(),
3200 VecTyFrom->getElementCount().getKnownMinValue() / 8);
3201 Coerced =
Builder.CreateBitCast(Coerced, VecTyFrom);
3203 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3206 assert(NumIRArgs == 1);
3207 Coerced->setName(Arg->
getName() +
".coerce");
3209 VecTyTo, Coerced, Zero,
"cast.fixed")));
3215 llvm::StructType *STy =
3218 STy->getNumElements() > 1) {
3219 [[maybe_unused]] llvm::TypeSize StructSize =
3221 [[maybe_unused]] llvm::TypeSize PtrElementSize =
3223 if (STy->containsHomogeneousScalableVectorTypes()) {
3224 assert(StructSize == PtrElementSize &&
3225 "Only allow non-fractional movement of structure with"
3226 "homogeneous scalable vector type");
3242 STy->getNumElements() > 1) {
3244 llvm::TypeSize PtrElementSize =
3246 if (StructSize.isScalable()) {
3247 assert(STy->containsHomogeneousScalableVectorTypes() &&
3248 "ABI only supports structure with homogeneous scalable vector "
3250 assert(StructSize == PtrElementSize &&
3251 "Only allow non-fractional movement of structure with"
3252 "homogeneous scalable vector type");
3253 assert(STy->getNumElements() == NumIRArgs);
3255 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3256 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3257 auto *AI =
Fn->getArg(FirstIRArg + i);
3258 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3260 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3265 uint64_t SrcSize = StructSize.getFixedValue();
3266 uint64_t DstSize = PtrElementSize.getFixedValue();
3269 if (SrcSize <= DstSize) {
3276 assert(STy->getNumElements() == NumIRArgs);
3277 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3278 auto AI =
Fn->getArg(FirstIRArg + i);
3279 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3284 if (SrcSize > DstSize) {
3290 assert(NumIRArgs == 1);
3291 auto AI =
Fn->getArg(FirstIRArg);
3292 AI->setName(Arg->
getName() +
".coerce");
3295 llvm::TypeSize::getFixed(
3296 getContext().getTypeSizeInChars(Ty).getQuantity() -
3322 unsigned argIndex = FirstIRArg;
3323 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3324 llvm::Type *eltType = coercionType->getElementType(i);
3329 auto elt =
Fn->getArg(argIndex++);
3332 assert(argIndex == FirstIRArg + NumIRArgs);
3344 auto FnArgIter =
Fn->arg_begin() + FirstIRArg;
3345 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3346 assert(FnArgIter ==
Fn->arg_begin() + FirstIRArg + NumIRArgs);
3347 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3348 auto AI =
Fn->getArg(FirstIRArg + i);
3349 AI->setName(Arg->
getName() +
"." + Twine(i));
3355 assert(NumIRArgs == 0);
3367 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3368 for (
int I = Args.size() - 1; I >= 0; --I)
3371 for (
unsigned I = 0,
E = Args.size(); I !=
E; ++I)
3377 while (insn->use_empty()) {
3378 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3379 if (!bitcast)
return;
3382 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3383 bitcast->eraseFromParent();
3389 llvm::Value *result) {
3391 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3392 if (BB->empty())
return nullptr;
3393 if (&BB->back() != result)
return nullptr;
3395 llvm::Type *resultType = result->getType();
3398 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3404 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3407 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3410 if (generator->getNextNode() != bitcast)
3413 InstsToKill.push_back(bitcast);
3420 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3421 if (!call)
return nullptr;
3423 bool doRetainAutorelease;
3426 doRetainAutorelease =
true;
3427 }
else if (call->getCalledOperand() ==
3429 doRetainAutorelease =
false;
3437 llvm::Instruction *prev = call->getPrevNode();
3439 if (isa<llvm::BitCastInst>(prev)) {
3440 prev = prev->getPrevNode();
3443 assert(isa<llvm::CallInst>(prev));
3444 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3446 InstsToKill.push_back(prev);
3452 result = call->getArgOperand(0);
3453 InstsToKill.push_back(call);
3457 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3458 if (!bitcast->hasOneUse())
break;
3459 InstsToKill.push_back(bitcast);
3460 result = bitcast->getOperand(0);
3464 for (
auto *I : InstsToKill)
3465 I->eraseFromParent();
3468 if (doRetainAutorelease)
3472 return CGF.
Builder.CreateBitCast(result, resultType);
3477 llvm::Value *result) {
3480 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3481 if (!method)
return nullptr;
3487 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3488 if (!retainCall || retainCall->getCalledOperand() !=
3493 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3494 llvm::LoadInst *load =
3495 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3496 if (!load || load->isAtomic() || load->isVolatile() ||
3503 llvm::Type *resultType = result->getType();
3505 assert(retainCall->use_empty());
3506 retainCall->eraseFromParent();
3509 return CGF.
Builder.CreateBitCast(load, resultType);
3516 llvm::Value *result) {
3539 auto GetStoreIfValid = [&CGF,
3540 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3541 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3542 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3548 assert(!SI->isAtomic() &&
3556 if (!ReturnValuePtr->hasOneUse()) {
3557 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3558 if (IP->empty())
return nullptr;
3562 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3563 if (isa<llvm::BitCastInst>(&I))
3565 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3566 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3569 return GetStoreIfValid(&I);
3574 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3575 if (!store)
return nullptr;
3579 llvm::BasicBlock *StoreBB = store->getParent();
3580 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3582 while (IP != StoreBB) {
3583 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3599 int BitWidth,
int CharWidth) {
3600 assert(CharWidth <= 64);
3601 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3604 if (BitOffset >= CharWidth) {
3605 Pos += BitOffset / CharWidth;
3606 BitOffset = BitOffset % CharWidth;
3609 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3610 if (BitOffset + BitWidth >= CharWidth) {
3611 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3612 BitWidth -= CharWidth - BitOffset;
3616 while (BitWidth >= CharWidth) {
3618 BitWidth -= CharWidth;
3622 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3630 int StorageSize,
int BitOffset,
int BitWidth,
3631 int CharWidth,
bool BigEndian) {
3634 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3637 std::reverse(TmpBits.begin(), TmpBits.end());
3639 for (uint64_t
V : TmpBits)
3640 Bits[StorageOffset++] |=
V;
3671 BFI.
Size, CharWidth,
3693 auto Src = TmpBits.begin();
3694 auto Dst = Bits.begin() + Offset + I * Size;
3695 for (
int J = 0; J < Size; ++J)
3715 std::fill_n(Bits.begin() + Offset, Size,
3720 int Pos,
int Size,
int CharWidth,
3725 for (
auto P = Bits.begin() + Pos,
E = Bits.begin() + Pos + Size;
P !=
E;
3727 Mask = (Mask << CharWidth) | *
P;
3729 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3731 Mask = (Mask << CharWidth) | *--
P;
3740 llvm::IntegerType *ITy,
3742 assert(Src->getType() == ITy);
3743 assert(ITy->getScalarSizeInBits() <= 64);
3746 int Size = DataLayout.getTypeStoreSize(ITy);
3754 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3760 llvm::ArrayType *ATy,
3763 int Size = DataLayout.getTypeStoreSize(ATy);
3770 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3772 llvm::Value *R = llvm::PoisonValue::get(ATy);
3773 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3775 DataLayout.isBigEndian());
3776 MaskIndex += CharsPerElt;
3777 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3778 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3779 R =
Builder.CreateInsertValue(R, T1, I);
3806 llvm::DebugLoc RetDbgLoc;
3807 llvm::Value *RV =
nullptr;
3817 llvm::Function::arg_iterator EI =
CurFn->arg_end();
3819 llvm::Value *ArgStruct = &*EI;
3823 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3829 auto AI =
CurFn->arg_begin();
3869 if (llvm::StoreInst *SI =
3875 RetDbgLoc = SI->getDebugLoc();
3877 RV = SI->getValueOperand();
3878 SI->eraseFromParent();
3901 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
3902 RT = FD->getReturnType();
3903 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
3904 RT = MD->getReturnType();
3908 llvm_unreachable(
"Unexpected function/method type");
3928 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3935 results.push_back(elt);
3939 if (results.size() == 1) {
3947 RV = llvm::PoisonValue::get(returnType);
3948 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
3949 RV =
Builder.CreateInsertValue(RV, results[i], i);
3956 llvm_unreachable(
"Invalid ABI kind for return argument");
3959 llvm::Instruction *
Ret;
3965 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3976 Ret->setDebugLoc(std::move(RetDbgLoc));
3989 ReturnsNonNullAttr *RetNNAttr =
nullptr;
3990 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
3993 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4001 assert(!requiresReturnValueNullabilityCheck() &&
4002 "Cannot check nullability and the nonnull attribute");
4003 AttrLoc = RetNNAttr->getLocation();
4004 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
4005 Handler = SanitizerHandler::NonnullReturn;
4007 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4008 if (
auto *TSI = DD->getTypeSourceInfo())
4010 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4011 CheckKind = SanitizerKind::NullabilityReturn;
4012 Handler = SanitizerHandler::NullabilityReturn;
4015 SanitizerScope SanScope(
this);
4022 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4023 if (requiresReturnValueNullabilityCheck())
4025 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4026 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4030 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4032 llvm::Value *DynamicData[] = {SLocPtr};
4033 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4053 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4054 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4081 if (
type->isReferenceType()) {
4090 param->
hasAttr<NSConsumedAttr>() &&
4091 type->isObjCRetainableType()) {
4094 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4109 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4111 "cleanup for callee-destructed param not recorded");
4113 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4119 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4132 "shouldn't have writeback for provably null argument");
4134 llvm::BasicBlock *contBB =
nullptr;
4140 if (!provablyNonNull) {
4145 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4154 "icr.writeback-cast");
4163 if (writeback.
ToUse) {
4188 if (!provablyNonNull)
4203 for (
const auto &I : llvm::reverse(Cleanups)) {
4205 I.IsActiveIP->eraseFromParent();
4211 if (uop->getOpcode() == UO_AddrOf)
4212 return uop->getSubExpr();
4242 llvm::PointerType *destType =
4244 llvm::Type *destElemType =
4261 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4267 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4271 llvm::BasicBlock *contBB =
nullptr;
4272 llvm::BasicBlock *originBB =
nullptr;
4275 llvm::Value *finalArgument;
4279 if (provablyNonNull) {
4284 finalArgument = CGF.
Builder.CreateSelect(
4285 isNull, llvm::ConstantPointerNull::get(destType),
4291 originBB = CGF.
Builder.GetInsertBlock();
4294 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4296 condEval.begin(CGF);
4300 llvm::Value *valueToUse =
nullptr;
4308 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4325 if (shouldCopy && !provablyNonNull) {
4326 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4331 llvm::PHINode *phiToUse = CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
4333 phiToUse->addIncoming(valueToUse, copyBB);
4334 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4336 valueToUse = phiToUse;
4350 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4356 CGF.
Builder.CreateStackRestore(StackBase);
4364 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4369 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4370 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4373 const NonNullAttr *NNAttr =
nullptr;
4374 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4377 bool CanCheckNullability =
false;
4378 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4379 !PVD->getType()->isRecordType()) {
4380 auto Nullability = PVD->getType()->getNullability();
4381 CanCheckNullability = Nullability &&
4383 PVD->getTypeSourceInfo();
4386 if (!NNAttr && !CanCheckNullability)
4393 AttrLoc = NNAttr->getLocation();
4394 CheckKind = SanitizerKind::NonnullAttribute;
4395 Handler = SanitizerHandler::NonnullArg;
4397 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4398 CheckKind = SanitizerKind::NullabilityArg;
4399 Handler = SanitizerHandler::NullabilityArg;
4402 SanitizerScope SanScope(
this);
4404 llvm::Constant *StaticData[] = {
4406 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4408 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4413 AbstractCallee AC,
unsigned ParmNum) {
4414 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4434 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4445 return classDecl->getTypeParamListAsWritten();
4449 return catDecl->getTypeParamList();
4459 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4460 AbstractCallee AC,
unsigned ParamsToSkip, EvaluationOrder Order) {
4463 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4464 "Can't skip parameters if type info is not provided");
4474 bool IsVariadic =
false;
4481 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4482 MD->param_type_end());
4486 ExplicitCC = FPT->getExtInfo().getCC();
4487 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4488 FPT->param_type_end());
4496 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4498 (isGenericMethod || Ty->isVariablyModifiedType() ||
4499 Ty.getNonReferenceType()->isObjCRetainableType() ||
4501 .getCanonicalType(Ty.getNonReferenceType())
4503 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4504 "type mismatch in call argument!");
4510 assert((Arg == ArgRange.end() || IsVariadic) &&
4511 "Extra arguments in non-variadic function!");
4516 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4517 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4518 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4530 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4532 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4534 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4541 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4542 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(),
T,
4543 EmittedArg.getScalarVal(),
4549 std::swap(Args.back(), *(&Args.back() - 1));
4554 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4555 "inalloca only supported on x86");
4560 size_t CallArgsStart = Args.size();
4561 for (
unsigned I = 0,
E = ArgTypes.size(); I !=
E; ++I) {
4562 unsigned Idx = LeftToRight ? I :
E - I - 1;
4564 unsigned InitialArgSize = Args.size();
4567 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4568 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4570 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4572 "Argument and parameter types don't match");
4576 assert(InitialArgSize + 1 == Args.size() &&
4577 "The code below depends on only adding one arg per EmitCallArg");
4578 (void)InitialArgSize;
4581 if (!Args.back().hasLValue()) {
4582 RValue RVArg = Args.back().getKnownRValue();
4584 ParamsToSkip + Idx);
4588 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4595 std::reverse(Args.begin() + CallArgsStart, Args.end());
4603 : Addr(Addr), Ty(Ty) {}
4621struct DisableDebugLocationUpdates {
4623 bool disabledDebugInfo;
4625 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(
E) && CGF.
getDebugInfo()))
4628 ~DisableDebugLocationUpdates() {
4629 if (disabledDebugInfo)
4665 DisableDebugLocationUpdates Dis(*
this,
E);
4667 = dyn_cast<ObjCIndirectCopyRestoreExpr>(
E)) {
4673 "reference binding to unmaterialized r-value!");
4685 if (
type->isRecordType() &&
4692 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4693 if (
const auto *RD =
type->getAsCXXRecordDecl())
4694 DestroyedInCallee = RD->hasNonTrivialDestructor();
4696 NeedsCleanup =
type.isDestructedType();
4698 if (DestroyedInCallee)
4705 if (DestroyedInCallee && NeedsCleanup) {
4712 llvm::Instruction *IsActive =
4719 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(
E) &&
4720 cast<CastExpr>(
E)->getCastKind() == CK_LValueToRValue &&
4721 !
type->isArrayParameterType()) {
4731QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4735 if (!
getTarget().getTriple().isOSWindows())
4752CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4755 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4762 const llvm::Twine &name) {
4770 const llvm::Twine &name) {
4772 for (
auto arg : args)
4773 values.push_back(
arg.emitRawPointer(*
this));
4780 const llvm::Twine &name) {
4782 call->setDoesNotThrow();
4789 const llvm::Twine &name) {
4804 if (
auto *CalleeFn = dyn_cast<llvm::Function>(
Callee->stripPointerCasts())) {
4805 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4806 auto IID = CalleeFn->getIntrinsicID();
4807 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4820 const llvm::Twine &name) {
4821 llvm::CallInst *call =
Builder.CreateCall(
4837 llvm::InvokeInst *invoke =
4843 invoke->setDoesNotReturn();
4846 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
4847 call->setDoesNotReturn();
4856 const Twine &name) {
4864 const Twine &name) {
4874 const Twine &Name) {
4879 llvm::CallBase *Inst;
4881 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
4884 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4892 AddObjCARCExceptionMetadata(Inst);
4897void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4899 DeferredReplacements.push_back(
4900 std::make_pair(llvm::WeakTrackingVH(Old), New));
4907[[nodiscard]] llvm::AttributeList
4908maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4909 const llvm::AttributeList &Attrs,
4910 llvm::Align NewAlign) {
4911 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4912 if (CurAlign >= NewAlign)
4914 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4915 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4916 .addRetAttribute(Ctx, AlignAttr);
4919template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
4924 const AlignedAttrTy *AA =
nullptr;
4926 llvm::Value *Alignment =
nullptr;
4927 llvm::ConstantInt *OffsetCI =
nullptr;
4933 AA = FuncDecl->
getAttr<AlignedAttrTy>();
4938 [[nodiscard]] llvm::AttributeList
4939 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
4940 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
4942 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4947 if (!AlignmentCI->getValue().isPowerOf2())
4949 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4952 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4964 AA->getLocation(), Alignment, OffsetCI);
4970class AssumeAlignedAttrEmitter final
4971 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4974 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4978 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
4979 if (
Expr *Offset = AA->getOffset()) {
4981 if (OffsetCI->isNullValue())
4988class AllocAlignAttrEmitter final
4989 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4993 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4997 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5006 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5007 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5008 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5011 unsigned MaxVectorWidth = 0;
5012 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5013 for (
auto *I : ST->elements())
5015 return MaxVectorWidth;
5022 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5024 bool IsVirtualFunctionPointerThunk) {
5036 const Decl *TargetDecl =
Callee.getAbstractInfo().getCalleeDecl().getDecl();
5037 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5044 if (TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5045 (TargetDecl->
hasAttr<TargetAttr>() ||
5054 dyn_cast_or_null<FunctionDecl>(TargetDecl), CallArgs, RetTy);
5061 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5064 llvm::AllocaInst *AI;
5066 IP = IP->getNextNode();
5067 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5073 AI->setAlignment(Align.getAsAlign());
5074 AI->setUsedWithInAlloca(
true);
5075 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5076 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5079 ClangToLLVMArgMapping IRFunctionArgs(
CGM.
getContext(), CallInfo);
5086 llvm::Value *UnusedReturnSizePtr =
nullptr;
5088 if (IsVirtualFunctionPointerThunk && RetAI.
isIndirect()) {
5090 IRFunctionArgs.getSRetArgNo(),
5097 llvm::TypeSize size =
5102 if (IRFunctionArgs.hasSRetArg()) {
5103 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5121 assert(CallInfo.
arg_size() == CallArgs.size() &&
5122 "Mismatch between function signature & arguments.");
5125 for (CallArgList::const_iterator I = CallArgs.begin(),
E = CallArgs.end();
5126 I !=
E; ++I, ++info_it, ++ArgNo) {
5130 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5131 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5134 unsigned FirstIRArg, NumIRArgs;
5135 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5137 bool ArgHasMaybeUndefAttr =
5142 assert(NumIRArgs == 0);
5143 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5144 if (I->isAggregate()) {
5146 ? I->getKnownLValue().getAddress()
5147 : I->getKnownRValue().getAggregateAddress();
5148 llvm::Instruction *Placeholder =
5153 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5154 Builder.SetInsertPoint(Placeholder);
5167 deferPlaceholderReplacement(Placeholder, Addr.
getPointer());
5172 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5173 "indirect-arg-temp");
5174 I->copyInto(*
this, Addr);
5183 I->copyInto(*
this, Addr);
5190 assert(NumIRArgs == 1);
5191 if (I->isAggregate()) {
5201 ? I->getKnownLValue().getAddress()
5202 : I->getKnownRValue().getAggregateAddress();
5206 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5207 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5208 TD->getAllocaAddrSpace()) &&
5209 "indirect argument must be in alloca address space");
5211 bool NeedCopy =
false;
5217 }
else if (I->hasLValue()) {
5218 auto LV = I->getKnownLValue();
5224 if (!isByValOrRef ||
5229 if ((isByValOrRef &&
5237 else if ((isByValOrRef &&
5238 Addr.
getType()->getAddressSpace() != IRFuncTy->
5247 auto *
T = llvm::PointerType::get(
5253 if (ArgHasMaybeUndefAttr)
5254 Val =
Builder.CreateFreeze(Val);
5255 IRCallArgs[FirstIRArg] = Val;
5265 if (ArgHasMaybeUndefAttr)
5266 Val =
Builder.CreateFreeze(Val);
5267 IRCallArgs[FirstIRArg] = Val;
5270 llvm::TypeSize ByvalTempElementSize =
5272 llvm::Value *LifetimeSize =
5277 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5280 I->copyInto(*
this, AI);
5285 assert(NumIRArgs == 0);
5293 assert(NumIRArgs == 1);
5295 if (!I->isAggregate())
5296 V = I->getKnownRValue().getScalarVal();
5299 I->hasLValue() ? I->getKnownLValue().getAddress()
5300 : I->getKnownRValue().getAggregateAddress());
5306 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5310 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5315 cast<llvm::AllocaInst>(
V)->setSwiftError(
true);
5323 V->getType()->isIntegerTy())
5328 if (FirstIRArg < IRFuncTy->getNumParams() &&
5329 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5330 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(FirstIRArg));
5332 if (ArgHasMaybeUndefAttr)
5334 IRCallArgs[FirstIRArg] =
V;
5338 llvm::StructType *STy =
5342 [[maybe_unused]] llvm::TypeSize SrcTypeSize =
5344 [[maybe_unused]] llvm::TypeSize DstTypeSize =
5346 if (STy->containsHomogeneousScalableVectorTypes()) {
5347 assert(SrcTypeSize == DstTypeSize &&
5348 "Only allow non-fractional movement of structure with "
5349 "homogeneous scalable vector type");
5351 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5358 if (!I->isAggregate()) {
5360 I->copyInto(*
this, Src);
5362 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5363 : I->getKnownRValue().getAggregateAddress();
5373 llvm::TypeSize SrcTypeSize =
5376 if (SrcTypeSize.isScalable()) {
5377 assert(STy->containsHomogeneousScalableVectorTypes() &&
5378 "ABI only supports structure with homogeneous scalable vector "
5380 assert(SrcTypeSize == DstTypeSize &&
5381 "Only allow non-fractional movement of structure with "
5382 "homogeneous scalable vector type");
5383 assert(NumIRArgs == STy->getNumElements());
5385 llvm::Value *StoredStructValue =
5387 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5388 llvm::Value *Extract =
Builder.CreateExtractValue(
5389 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5390 IRCallArgs[FirstIRArg + i] = Extract;
5393 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5394 uint64_t DstSize = DstTypeSize.getFixedValue();
5400 if (SrcSize < DstSize) {
5409 assert(NumIRArgs == STy->getNumElements());
5410 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5413 if (ArgHasMaybeUndefAttr)
5414 LI =
Builder.CreateFreeze(LI);
5415 IRCallArgs[FirstIRArg + i] = LI;
5420 assert(NumIRArgs == 1);
5428 auto *ATy = dyn_cast<llvm::ArrayType>(
Load->getType());
5429 if (ATy !=
nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5433 if (ArgHasMaybeUndefAttr)
5435 IRCallArgs[FirstIRArg] =
Load;
5445 llvm::Value *tempSize =
nullptr;
5448 if (I->isAggregate()) {
5449 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5450 : I->getKnownRValue().getAggregateAddress();
5453 RValue RV = I->getKnownRValue();
5465 nullptr, &AllocaAddr);
5473 unsigned IRArgPos = FirstIRArg;
5474 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5475 llvm::Type *eltType = coercionType->getElementType(i);
5479 if (ArgHasMaybeUndefAttr)
5480 elt =
Builder.CreateFreeze(elt);
5481 IRCallArgs[IRArgPos++] = elt;
5483 assert(IRArgPos == FirstIRArg + NumIRArgs);
5493 unsigned IRArgPos = FirstIRArg;
5494 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5495 assert(IRArgPos == FirstIRArg + NumIRArgs);
5501 const CGCallee &ConcreteCallee =
Callee.prepareConcreteCallee(*
this);
5507 assert(IRFunctionArgs.hasInallocaArg());
5508 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5519 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5520 llvm::Value *Ptr) -> llvm::Function * {
5521 if (!CalleeFT->isVarArg())
5525 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5526 if (CE->getOpcode() == llvm::Instruction::BitCast)
5527 Ptr = CE->getOperand(0);
5530 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5534 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5538 if (OrigFT->isVarArg() ||
5539 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5540 OrigFT->getReturnType() != CalleeFT->getReturnType())
5543 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5544 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5550 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5552 IRFuncTy = OrigFn->getFunctionType();
5567 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5568 for (
unsigned i = 0; i < IRCallArgs.size(); ++i) {
5570 if (IRFunctionArgs.hasInallocaArg() &&
5571 i == IRFunctionArgs.getInallocaArgNo())
5573 if (i < IRFuncTy->getNumParams())
5574 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5579 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5580 LargestVectorWidth = std::max(LargestVectorWidth,
5585 llvm::AttributeList Attrs;
5591 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5592 getTarget().getTriple().isWindowsArm64EC()) {
5593 CGM.
Error(
Loc,
"__vectorcall calling convention is not currently "
5598 if (FD->hasAttr<StrictFPAttr>())
5600 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5605 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5611 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5615 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5620 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5625 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5634 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>())) {
5636 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5641 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5648 CannotThrow =
false;
5657 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5659 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5660 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5668 if (UnusedReturnSizePtr)
5670 UnusedReturnSizePtr);
5672 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr :
getInvokeDest();
5678 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5685 if (FD->hasAttr<StrictFPAttr>())
5687 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5689 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
5690 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5692 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
5693 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5698 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5701 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5705 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5706 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
5715 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
5716 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
5717 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5723 CI->setAttributes(Attrs);
5724 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
5728 if (!CI->getType()->isVoidTy())
5729 CI->setName(
"call");
5735 LargestVectorWidth =
5741 if (!CI->getCalledFunction())
5748 AddObjCARCExceptionMetadata(CI);
5751 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
5752 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
5753 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5754 else if (IsMustTail) {
5761 else if (
Call->isIndirectCall())
5763 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
5764 if (!cast<FunctionDecl>(TargetDecl)->isDefined())
5769 {cast<FunctionDecl>(TargetDecl),
Loc});
5773 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
5774 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
5781 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5787 TargetDecl->
hasAttr<MSAllocatorAttr>())
5791 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
5792 llvm::ConstantInt *
Line =
5794 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(
Line);
5796 CI->setMetadata(
"srcloc", MDT);
5804 if (CI->doesNotReturn()) {
5805 if (UnusedReturnSizePtr)
5809 if (
SanOpts.
has(SanitizerKind::Unreachable)) {
5812 if (
auto *F = CI->getCalledFunction())
5813 F->removeFnAttr(llvm::Attribute::NoReturn);
5814 CI->removeFnAttr(llvm::Attribute::NoReturn);
5819 SanitizerKind::KernelAddress)) {
5820 SanitizerScope SanScope(
this);
5821 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
5823 auto *FnType = llvm::FunctionType::get(
CGM.
VoidTy,
false);
5824 llvm::FunctionCallee
Fn =
5831 Builder.ClearInsertionPoint();
5851 if (CI->getType()->isVoidTy())
5855 Builder.ClearInsertionPoint();
5861 if (swiftErrorTemp.
isValid()) {
5880 if (IsVirtualFunctionPointerThunk) {
5891 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5893 unsigned unpaddedIndex = 0;
5894 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5895 llvm::Type *eltType = coercionType->getElementType(i);
5899 llvm::Value *elt = CI;
5900 if (requiresExtract)
5901 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
5903 assert(unpaddedIndex == 0);
5912 if (UnusedReturnSizePtr)
5929 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
5930 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
5938 llvm::Value *
V = CI;
5939 if (
V->getType() != RetIRTy)
5949 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
5950 llvm::Value *
V = CI;
5951 if (
auto *ScalableSrcTy =
5952 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
5953 if (FixedDstTy->getElementType() ==
5954 ScalableSrcTy->getElementType()) {
5956 V =
Builder.CreateExtractVector(FixedDstTy,
V, Zero,
5970 DestIsVolatile =
false;
5991 llvm_unreachable(
"Invalid ABI kind for return argument");
5994 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
5999 if (
Ret.isScalar() && TargetDecl) {
6000 AssumeAlignedAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6001 AllocAlignAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6006 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
6007 LifetimeEnd.Emit(*
this, {});
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsWindows)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
enum clang::sema::@1653::IndirectLocalPathEntry::EntryKind Kind
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin=false) const
Retrieves the default calling convention for the current target.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const LLVM_READONLY
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StringRef getName() const
Return the IR name of the pointer value.
llvm::PointerType * getType() const
Return the type of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
const BlockExpr * BlockExpression
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
const TargetInfo & getTarget() const
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
CGDebugInfo * getDebugInfo()
Address EmitVAListRef(const Expr *E)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Type * ConvertType(QualType T)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
CodeGenTypes & getTypes() const
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const CallExpr * MustTailCall
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
CallType * addControlledConvergenceToken(CallType *Input)
This class organizes the cross-function state that is used while generating LLVM code.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
void addUndefinedGlobalForTailCall(std::pair< const FunctionDecl *, SourceLocation > Global)
ObjCEntrypoints & getObjCEntrypoints() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD)
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const ABIInfo & getABIInfo() const
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
EHScopeStack::Cleanup * getCleanup()
Information for lazily generating a cleanup.
virtual bool isRedundantBeforeReturn()
A saved depth on the scope stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator end() const
Returns an iterator pointing to the outermost EH scope.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
bool isVolatileQualified() const
LangAS getAddressSpace() const
CharUnits getAlignment() const
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Qualifiers::ObjCLifetime getObjCLifetime() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isZeroLengthBitField(const ASTContext &Ctx) const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isVariadic() const
Whether this function prototype is variadic.
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
const Decl * getDecl() const
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
bool isParamDestroyedInCallee() const
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
The base class of the type hierarchy.
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
bool isBlockPointerType() const
bool isIncompleteArrayType() const
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
QualType getCanonicalTypeInternal() const
bool isMemberPointerType() const
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isObjCRetainableType() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
const Expr * getSubExpr() const
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
bool This(InterpState &S, CodePtr OpPC)
bool Zero(InterpState &S, CodePtr OpPC)
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ Result
The result type of a method or function.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::BasicBlock * getBlock() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
LangAS getASTAllocaAddressSpace() const
bool isMSVCXXPersonality() const
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Iterator for iterating over Stmt * arrays that contain only T *.