32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
46using namespace CodeGen;
52 default:
return llvm::CallingConv::C;
57 case CC_Win64:
return llvm::CallingConv::Win64;
59 case CC_AAPCS:
return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
73 case CC_Swift:
return llvm::CallingConv::Swift;
75 case CC_M68kRTD:
return llvm::CallingConv::M68k_RTD;
126 unsigned totalArgs) {
128 assert(paramInfos.size() <= prefixArgs);
129 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
131 paramInfos.reserve(totalArgs);
134 paramInfos.resize(prefixArgs);
138 paramInfos.push_back(ParamInfo);
140 if (ParamInfo.hasPassObjectSize())
141 paramInfos.emplace_back();
144 assert(paramInfos.size() <= totalArgs &&
145 "Did we forget to insert pass_object_size args?");
147 paramInfos.resize(totalArgs);
157 if (!FPT->hasExtParameterInfos()) {
158 assert(paramInfos.empty() &&
159 "We have paramInfos, but the prototype doesn't?");
160 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
164 unsigned PrefixSize = prefix.size();
168 prefix.reserve(prefix.size() + FPT->getNumParams());
170 auto ExtInfos = FPT->getExtParameterInfos();
171 assert(ExtInfos.size() == FPT->getNumParams());
172 for (
unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
173 prefix.push_back(FPT->getParamType(I));
174 if (ExtInfos[I].hasPassObjectSize())
197 FTP->getExtInfo(), paramInfos,
Required);
205 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
215 if (D->
hasAttr<FastCallAttr>())
221 if (D->
hasAttr<ThisCallAttr>())
224 if (D->
hasAttr<VectorCallAttr>())
230 if (PcsAttr *PCS = D->
getAttr<PcsAttr>())
233 if (D->
hasAttr<AArch64VectorPcsAttr>())
236 if (D->
hasAttr<AArch64SVEPcsAttr>())
239 if (D->
hasAttr<AMDGPUKernelCallAttr>())
242 if (D->
hasAttr<IntelOclBiccAttr>())
251 if (D->
hasAttr<PreserveMostAttr>())
254 if (D->
hasAttr<PreserveAllAttr>())
260 if (D->
hasAttr<PreserveNoneAttr>())
281 return ::arrangeLLVMFunctionInfo(
282 *
this,
true, argTypes,
289 if (FD->
hasAttr<CUDAGlobalAttr>()) {
302 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
303 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
324 !
Target.getCXXABI().hasConstructorVariants();
329 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
337 bool PassParams =
true;
339 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
342 if (
auto Inherited = CD->getInheritedConstructor())
354 if (!paramInfos.empty()) {
357 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
360 paramInfos.append(AddedArgs.
Suffix,
365 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
375 argTypes, extInfo, paramInfos, required);
381 for (
auto &arg : args)
389 for (
auto &arg : args)
396 unsigned prefixArgs,
unsigned totalArgs) {
416 unsigned ExtraPrefixArgs,
417 unsigned ExtraSuffixArgs,
418 bool PassProtoArgs) {
421 for (
const auto &Arg : args)
425 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
430 FPT, TotalPrefixArgs + ExtraSuffixArgs)
444 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
451 ArgTypes, Info, ParamInfos,
Required);
459 if (MD->isImplicitObjectMemberFunction())
464 assert(isa<FunctionType>(FTy));
471 std::nullopt, noProto->getExtInfo(), {},
506 I->hasAttr<NoEscapeAttr>());
507 extParamInfos.push_back(extParamInfo);
514 if (
getContext().getLangOpts().ObjCAutoRefCount &&
515 MD->
hasAttr<NSReturnsRetainedAttr>())
541 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
542 isa<CXXDestructorDecl>(GD.
getDecl()))
555 assert(MD->
isVirtual() &&
"only methods have thunks");
572 ArgTys.push_back(*FTP->param_type_begin());
574 ArgTys.push_back(Context.
IntTy);
589 unsigned numExtraRequiredArgs,
591 assert(args.size() >= numExtraRequiredArgs);
601 if (proto->isVariadic())
604 if (proto->hasExtParameterInfos())
614 cast<FunctionNoProtoType>(fnType))) {
620 for (
const auto &arg : args)
625 paramInfos, required);
637 chainCall ? 1 : 0, chainCall);
666 for (
const auto &Arg : args)
699 unsigned numPrefixArgs) {
700 assert(numPrefixArgs + 1 <= args.size() &&
701 "Emitting a call with less args than the required prefix?");
713 paramInfos, required);
725 assert(signature.
arg_size() <= args.size());
726 if (signature.
arg_size() == args.size())
731 if (!sigParamInfos.empty()) {
732 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
733 paramInfos.resize(args.size());
765 assert(llvm::all_of(argTypes,
769 llvm::FoldingSetNodeID ID;
774 bool isDelegateCall =
777 info, paramInfos, required, resultType, argTypes);
779 void *insertPos =
nullptr;
780 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
788 info, paramInfos, resultType, argTypes, required);
789 FunctionInfos.InsertNode(FI, insertPos);
791 bool inserted = FunctionsBeingProcessed.insert(FI).second;
793 assert(inserted &&
"Recursively being processed?");
796 if (CC == llvm::CallingConv::SPIR_KERNEL) {
814 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
817 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
818 assert(erased &&
"Not in set?");
824 bool chainCall,
bool delegateCall,
830 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
835 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
836 argTypes.size() + 1, paramInfos.size()));
839 FI->CallingConvention = llvmCC;
840 FI->EffectiveCallingConvention = llvmCC;
841 FI->ASTCallingConvention = info.
getCC();
842 FI->InstanceMethod = instanceMethod;
843 FI->ChainCall = chainCall;
844 FI->DelegateCall = delegateCall;
850 FI->Required = required;
853 FI->ArgStruct =
nullptr;
854 FI->ArgStructAlign = 0;
855 FI->NumArgs = argTypes.size();
856 FI->HasExtParameterInfos = !paramInfos.empty();
857 FI->getArgsBuffer()[0].
type = resultType;
858 FI->MaxVectorWidth = 0;
859 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
860 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
861 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
862 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
872struct TypeExpansion {
873 enum TypeExpansionKind {
885 const TypeExpansionKind Kind;
887 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
888 virtual ~TypeExpansion() {}
891struct ConstantArrayExpansion : TypeExpansion {
895 ConstantArrayExpansion(
QualType EltTy, uint64_t NumElts)
896 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
897 static bool classof(
const TypeExpansion *TE) {
898 return TE->Kind == TEK_ConstantArray;
902struct RecordExpansion : TypeExpansion {
909 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
910 Fields(
std::move(Fields)) {}
911 static bool classof(
const TypeExpansion *TE) {
912 return TE->Kind == TEK_Record;
916struct ComplexExpansion : TypeExpansion {
920 static bool classof(
const TypeExpansion *TE) {
925struct NoExpansion : TypeExpansion {
926 NoExpansion() : TypeExpansion(TEK_None) {}
927 static bool classof(
const TypeExpansion *TE) {
928 return TE->Kind == TEK_None;
933static std::unique_ptr<TypeExpansion>
936 return std::make_unique<ConstantArrayExpansion>(
937 AT->getElementType(), AT->getSize().getZExtValue());
944 "Cannot expand structure with flexible array.");
951 for (
const auto *FD : RD->
fields()) {
952 if (FD->isZeroLengthBitField(Context))
954 assert(!FD->isBitField() &&
955 "Cannot expand structure with bit-field members.");
957 if (UnionSize < FieldSize) {
958 UnionSize = FieldSize;
963 Fields.push_back(LargestFD);
965 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
966 assert(!CXXRD->isDynamicClass() &&
967 "cannot expand vtable pointers in dynamic classes");
968 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
971 for (
const auto *FD : RD->
fields()) {
972 if (FD->isZeroLengthBitField(Context))
974 assert(!FD->isBitField() &&
975 "Cannot expand structure with bit-field members.");
976 Fields.push_back(FD);
979 return std::make_unique<RecordExpansion>(std::move(Bases),
983 return std::make_unique<ComplexExpansion>(CT->getElementType());
985 return std::make_unique<NoExpansion>();
990 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
993 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
995 for (
auto BS : RExp->Bases)
997 for (
auto FD : RExp->Fields)
1001 if (isa<ComplexExpansion>(Exp.get()))
1003 assert(isa<NoExpansion>(Exp.get()));
1011 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1012 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1015 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1016 for (
auto BS : RExp->Bases)
1018 for (
auto FD : RExp->Fields)
1020 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1025 assert(isa<NoExpansion>(Exp.get()));
1031 ConstantArrayExpansion *CAE,
1033 llvm::function_ref<
void(
Address)> Fn) {
1039 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1040 llvm::Value *EltAddr = CGF.
Builder.CreateConstGEP2_32(
1042 Fn(
Address(EltAddr, EltTy, EltAlign));
1047 llvm::Function::arg_iterator &AI) {
1049 "Unexpected non-simple lvalue during struct expansion.");
1052 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1055 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1056 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1058 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1068 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1070 for (
auto FD : RExp->Fields) {
1073 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1075 }
else if (isa<ComplexExpansion>(Exp.get())) {
1076 auto realValue = &*AI++;
1077 auto imagValue = &*AI++;
1082 assert(isa<NoExpansion>(Exp.get()));
1083 llvm::Value *Arg = &*AI++;
1090 if (Arg->getType()->isPointerTy()) {
1099void CodeGenFunction::ExpandTypeToArgs(
1103 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1107 *
this, CAExp, Addr, [&](
Address EltAddr) {
1111 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1114 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1125 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1130 for (
auto FD : RExp->Fields) {
1133 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1136 }
else if (isa<ComplexExpansion>(Exp.get())) {
1138 IRCallArgs[IRCallArgPos++] = CV.first;
1139 IRCallArgs[IRCallArgPos++] = CV.second;
1141 assert(isa<NoExpansion>(Exp.get()));
1143 assert(RV.isScalar() &&
1144 "Unexpected non-scalar rvalue during struct expansion.");
1147 llvm::Value *
V = RV.getScalarVal();
1148 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1149 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1150 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1152 IRCallArgs[IRCallArgPos++] =
V;
1159 const Twine &Name =
"tmp") {
1173 llvm::StructType *SrcSTy,
1176 if (SrcSTy->getNumElements() == 0)
return SrcPtr;
1184 uint64_t FirstEltSize =
1186 if (FirstEltSize < DstSize &&
1195 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1211 if (Val->getType() == Ty)
1214 if (isa<llvm::PointerType>(Val->getType())) {
1216 if (isa<llvm::PointerType>(Ty))
1217 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1223 llvm::Type *DestIntTy = Ty;
1224 if (isa<llvm::PointerType>(DestIntTy))
1227 if (Val->getType() != DestIntTy) {
1229 if (DL.isBigEndian()) {
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1233 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1235 if (SrcSize > DstSize) {
1236 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1237 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1239 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1240 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1244 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1248 if (isa<llvm::PointerType>(Ty))
1249 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1272 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1274 DstSize.getFixedValue(), CGF);
1282 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1283 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1289 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1290 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1304 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1305 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1311 ScalableDstTy = llvm::ScalableVectorType::get(
1312 FixedSrcTy->getElementType(),
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1317 auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
1318 auto *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1320 ScalableDstTy, UndefVec, Load, Zero,
"cast.scalable");
1321 if (ScalableDstTy != Ty)
1334 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1343 bool DestIsVolatile) {
1345 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1346 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1348 llvm::Value *Elt =
Builder.CreateExtractValue(Val, i);
1366 llvm::Type *SrcTy = Src->getType();
1368 if (SrcTy == DstTy) {
1375 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1377 SrcSize.getFixedValue(), CGF);
1381 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1382 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1383 if (SrcPtrTy && DstPtrTy &&
1384 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1392 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1393 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1402 if (isa<llvm::ScalableVectorType>(SrcTy) ||
1403 isa<llvm::ScalableVectorType>(DstTy) ||
1404 SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
1422 llvm::ConstantInt::get(CGF.
IntPtrTy, DstSize.getFixedValue()));
1441class ClangToLLVMArgMapping {
1442 static const unsigned InvalidIndex = ~0
U;
1443 unsigned InallocaArgNo;
1445 unsigned TotalIRArgs;
1449 unsigned PaddingArgIndex;
1452 unsigned FirstArgIndex;
1453 unsigned NumberOfArgs;
1456 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1464 bool OnlyRequiredArgs =
false)
1465 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1466 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1467 construct(Context, FI, OnlyRequiredArgs);
1470 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1471 unsigned getInallocaArgNo()
const {
1472 assert(hasInallocaArg());
1473 return InallocaArgNo;
1476 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1477 unsigned getSRetArgNo()
const {
1478 assert(hasSRetArg());
1482 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1484 bool hasPaddingArg(
unsigned ArgNo)
const {
1485 assert(ArgNo < ArgInfo.size());
1486 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1488 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1489 assert(hasPaddingArg(ArgNo));
1490 return ArgInfo[ArgNo].PaddingArgIndex;
1495 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1496 assert(ArgNo < ArgInfo.size());
1497 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1498 ArgInfo[ArgNo].NumberOfArgs);
1503 bool OnlyRequiredArgs);
1506void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1508 bool OnlyRequiredArgs) {
1509 unsigned IRArgNo = 0;
1510 bool SwapThisWithSRet =
false;
1515 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1526 auto &IRArgs = ArgInfo[ArgNo];
1529 IRArgs.PaddingArgIndex = IRArgNo++;
1535 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1537 IRArgs.NumberOfArgs = STy->getNumElements();
1539 IRArgs.NumberOfArgs = 1;
1545 IRArgs.NumberOfArgs = 1;
1550 IRArgs.NumberOfArgs = 0;
1560 if (IRArgs.NumberOfArgs > 0) {
1561 IRArgs.FirstArgIndex = IRArgNo;
1562 IRArgNo += IRArgs.NumberOfArgs;
1567 if (IRArgNo == 1 && SwapThisWithSRet)
1570 assert(ArgNo == ArgInfo.size());
1573 InallocaArgNo = IRArgNo++;
1575 TotalIRArgs = IRArgNo;
1583 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1593 switch (BT->getKind()) {
1596 case BuiltinType::Float:
1598 case BuiltinType::Double:
1600 case BuiltinType::LongDouble:
1611 if (BT->getKind() == BuiltinType::LongDouble)
1627 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1629 assert(Inserted &&
"Recursively being processed?");
1631 llvm::Type *resultType =
nullptr;
1636 llvm_unreachable(
"Invalid ABI kind for return argument");
1648 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1664 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1668 if (IRFunctionArgs.hasSRetArg()) {
1671 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1676 if (IRFunctionArgs.hasInallocaArg())
1677 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1684 for (; it != ie; ++it, ++ArgNo) {
1688 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1689 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1692 unsigned FirstIRArg, NumIRArgs;
1693 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1698 assert(NumIRArgs == 0);
1702 assert(NumIRArgs == 1);
1704 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1708 assert(NumIRArgs == 1);
1709 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1717 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1719 assert(NumIRArgs == st->getNumElements());
1720 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1721 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1723 assert(NumIRArgs == 1);
1724 ArgTypes[FirstIRArg] = argType;
1730 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1732 *ArgTypesIter++ = EltTy;
1734 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1739 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1741 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1746 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1747 assert(Erased &&
"Not in set?");
1749 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1763 llvm::AttrBuilder &FuncAttrs,
1770 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1774 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1776 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1780 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1782 FuncAttrs.addAttribute(
"aarch64_in_za");
1784 FuncAttrs.addAttribute(
"aarch64_out_za");
1786 FuncAttrs.addAttribute(
"aarch64_inout_za");
1790 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1792 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1794 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1796 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1800 const Decl *Callee) {
1806 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1807 AA->getAssumption().split(Attrs,
",");
1810 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1811 llvm::join(Attrs.begin(), Attrs.end(),
","));
1820 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1821 return ClassDecl->hasTrivialDestructor();
1827 const Decl *TargetDecl) {
1833 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1837 if (!
Module.getLangOpts().CPlusPlus)
1840 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1841 if (FDecl->isExternC())
1843 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1845 if (VDecl->isExternC())
1853 return Module.getCodeGenOpts().StrictReturn ||
1854 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1855 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1862 llvm::DenormalMode FP32DenormalMode,
1863 llvm::AttrBuilder &FuncAttrs) {
1864 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1865 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1867 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1868 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1876 llvm::AttrBuilder &FuncAttrs) {
1882 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1884 llvm::AttrBuilder &FuncAttrs) {
1887 if (CodeGenOpts.OptimizeSize)
1888 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1889 if (CodeGenOpts.OptimizeSize == 2)
1890 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1893 if (CodeGenOpts.DisableRedZone)
1894 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1895 if (CodeGenOpts.IndirectTlsSegRefs)
1896 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1897 if (CodeGenOpts.NoImplicitFloat)
1898 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1900 if (AttrOnCallSite) {
1905 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1907 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1909 switch (CodeGenOpts.getFramePointer()) {
1915 FuncAttrs.addAttribute(
"frame-pointer",
1917 CodeGenOpts.getFramePointer()));
1920 if (CodeGenOpts.LessPreciseFPMAD)
1921 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
1923 if (CodeGenOpts.NullPointerIsValid)
1924 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1927 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
1931 if (LangOpts.NoHonorInfs)
1932 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
1933 if (LangOpts.NoHonorNaNs)
1934 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
1935 if (LangOpts.ApproxFunc)
1936 FuncAttrs.addAttribute(
"approx-func-fp-math",
"true");
1937 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1938 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1939 (LangOpts.getDefaultFPContractMode() ==
1941 LangOpts.getDefaultFPContractMode() ==
1943 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
1944 if (CodeGenOpts.SoftFloat)
1945 FuncAttrs.addAttribute(
"use-soft-float",
"true");
1946 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
1947 llvm::utostr(CodeGenOpts.SSPBufferSize));
1948 if (LangOpts.NoSignedZero)
1949 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
1952 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
1953 if (!Recips.empty())
1954 FuncAttrs.addAttribute(
"reciprocal-estimates",
1955 llvm::join(Recips,
","));
1959 FuncAttrs.addAttribute(
"prefer-vector-width",
1962 if (CodeGenOpts.StackRealignment)
1963 FuncAttrs.addAttribute(
"stackrealign");
1964 if (CodeGenOpts.Backchain)
1965 FuncAttrs.addAttribute(
"backchain");
1966 if (CodeGenOpts.EnableSegmentedStacks)
1967 FuncAttrs.addAttribute(
"split-stack");
1969 if (CodeGenOpts.SpeculativeLoadHardening)
1970 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1973 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1974 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1975 FuncAttrs.removeAttribute(
"zero-call-used-regs");
1977 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1978 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
1980 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1981 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
1983 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1984 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
1986 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1987 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
1989 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1990 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
1992 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1993 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
1995 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1996 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
1998 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
1999 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2010 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2015 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2016 LangOpts.SYCLIsDevice) {
2017 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2021 StringRef Var,
Value;
2023 FuncAttrs.addAttribute(Var,
Value);
2034 const llvm::Function &F,
2036 auto FFeatures = F.getFnAttribute(
"target-features");
2038 llvm::StringSet<> MergedNames;
2040 MergedFeatures.reserve(TargetOpts.
Features.size());
2042 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2043 for (StringRef Feature : FeatureRange) {
2044 if (Feature.empty())
2046 assert(Feature[0] ==
'+' || Feature[0] ==
'-');
2047 StringRef Name = Feature.drop_front(1);
2048 bool Merged = !MergedNames.insert(Name).second;
2050 MergedFeatures.push_back(Feature);
2054 if (FFeatures.isValid())
2055 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2056 AddUnmergedFeatures(TargetOpts.
Features);
2058 if (!MergedFeatures.empty()) {
2059 llvm::sort(MergedFeatures);
2060 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2067 bool WillInternalize) {
2069 llvm::AttrBuilder FuncAttrs(F.getContext());
2072 if (!TargetOpts.
CPU.empty())
2073 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2074 if (!TargetOpts.
TuneCPU.empty())
2075 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2078 CodeGenOpts, LangOpts,
2081 if (!WillInternalize && F.isInterposable()) {
2086 F.addFnAttrs(FuncAttrs);
2090 llvm::AttributeMask AttrsToRemove;
2092 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2093 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2094 llvm::DenormalMode Merged =
2098 if (DenormModeToMergeF32.isValid()) {
2103 if (Merged == llvm::DenormalMode::getDefault()) {
2104 AttrsToRemove.addAttribute(
"denormal-fp-math");
2105 }
else if (Merged != DenormModeToMerge) {
2107 FuncAttrs.addAttribute(
"denormal-fp-math",
2111 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2112 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2113 }
else if (MergedF32 != DenormModeToMergeF32) {
2115 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2119 F.removeFnAttrs(AttrsToRemove);
2124 F.addFnAttrs(FuncAttrs);
2127void CodeGenModule::getTrivialDefaultFunctionAttributes(
2128 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2129 llvm::AttrBuilder &FuncAttrs) {
2130 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2135void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2137 bool AttrOnCallSite,
2138 llvm::AttrBuilder &FuncAttrs) {
2139 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2143 if (!AttrOnCallSite)
2148 llvm::AttrBuilder &attrs) {
2149 getDefaultFunctionAttributes(
"",
false,
2151 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2156 const NoBuiltinAttr *NBA =
nullptr) {
2157 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2159 AttributeName +=
"no-builtin-";
2160 AttributeName += BuiltinName;
2161 FuncAttrs.addAttribute(AttributeName);
2165 if (LangOpts.NoBuiltin) {
2167 FuncAttrs.addAttribute(
"no-builtins");
2181 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2182 FuncAttrs.addAttribute(
"no-builtins");
2187 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2191 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2192 bool CheckCoerce =
true) {
2193 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2199 if (!DL.typeSizeEqualsStoreSize(Ty))
2206 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2207 DL.getTypeSizeInBits(Ty)))
2231 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2233 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2242 unsigned NumRequiredArgs,
unsigned ArgNo) {
2243 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2248 if (ArgNo >= NumRequiredArgs)
2252 if (ArgNo < FD->getNumParams()) {
2253 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2254 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2271 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2274 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2276 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2277 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2286 llvm::FPClassTest Mask = llvm::fcNone;
2287 if (LangOpts.NoHonorInfs)
2288 Mask |= llvm::fcInf;
2289 if (LangOpts.NoHonorNaNs)
2290 Mask |= llvm::fcNan;
2296 llvm::AttributeList &Attrs) {
2297 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2298 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2299 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2325 llvm::AttributeList &AttrList,
2327 bool AttrOnCallSite,
bool IsThunk) {
2335 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2337 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2349 bool HasOptnone =
false;
2351 const NoBuiltinAttr *NBA =
nullptr;
2355 auto AddPotentialArgAccess = [&]() {
2356 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2358 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2359 llvm::MemoryEffects::argMemOnly());
2366 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2367 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2368 if (TargetDecl->
hasAttr<NoThrowAttr>())
2369 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2370 if (TargetDecl->
hasAttr<NoReturnAttr>())
2371 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2372 if (TargetDecl->
hasAttr<ColdAttr>())
2373 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2374 if (TargetDecl->
hasAttr<HotAttr>())
2375 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2376 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2377 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2378 if (TargetDecl->
hasAttr<ConvergentAttr>())
2379 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2381 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2384 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2386 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2388 (Kind == OO_New || Kind == OO_Array_New))
2389 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2392 const bool IsVirtualCall = MD && MD->
isVirtual();
2395 if (!(AttrOnCallSite && IsVirtualCall)) {
2396 if (Fn->isNoReturn())
2397 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2398 NBA = Fn->getAttr<NoBuiltinAttr>();
2402 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2405 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2406 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2410 if (TargetDecl->
hasAttr<ConstAttr>()) {
2411 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2412 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2415 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2416 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2417 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2418 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2420 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2421 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2422 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2423 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2425 if (TargetDecl->
hasAttr<RestrictAttr>())
2426 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2427 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2428 !CodeGenOpts.NullPointerIsValid)
2429 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2430 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2431 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2432 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2433 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2434 if (TargetDecl->
hasAttr<LeafAttr>())
2435 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2437 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2438 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2439 std::optional<unsigned> NumElemsParam;
2440 if (AllocSize->getNumElemsParam().isValid())
2441 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2442 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2446 if (TargetDecl->
hasAttr<OpenCLKernelAttr>()) {
2449 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2456 FuncAttrs.addAttribute(
2457 "uniform-work-group-size",
2458 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2462 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2464 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2466 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2467 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2479 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2484 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2485 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2486 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2487 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2488 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2489 FuncAttrs.removeAttribute(
"split-stack");
2490 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2493 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2494 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2495 FuncAttrs.addAttribute(
2496 "zero-call-used-regs",
2497 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2504 if (CodeGenOpts.NoPLT) {
2505 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2506 if (!Fn->isDefined() && !AttrOnCallSite) {
2507 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2515 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2516 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2517 if (!FD->isExternallyVisible())
2518 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2525 if (!AttrOnCallSite) {
2526 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2527 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2530 auto shouldDisableTailCalls = [&] {
2532 if (CodeGenOpts.DisableTailCalls)
2538 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2539 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2542 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2543 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2544 if (!BD->doesNotEscape())
2550 if (shouldDisableTailCalls())
2551 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2555 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2559 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2566 if (CodeGenOpts.EnableNoundefAttrs &&
2570 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2576 RetAttrs.addAttribute(llvm::Attribute::SExt);
2578 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2582 RetAttrs.addAttribute(llvm::Attribute::InReg);
2594 AddPotentialArgAccess();
2603 llvm_unreachable(
"Invalid ABI kind for return argument");
2611 RetAttrs.addDereferenceableAttr(
2613 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2614 !CodeGenOpts.NullPointerIsValid)
2615 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2617 llvm::Align Alignment =
2619 RetAttrs.addAlignmentAttr(Alignment);
2624 bool hasUsedSRet =
false;
2628 if (IRFunctionArgs.hasSRetArg()) {
2630 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2631 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2632 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2635 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2637 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2642 if (IRFunctionArgs.hasInallocaArg()) {
2645 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2654 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2656 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2663 if (!CodeGenOpts.NullPointerIsValid &&
2665 Attrs.addAttribute(llvm::Attribute::NonNull);
2672 Attrs.addDereferenceableOrNullAttr(
2678 llvm::Align Alignment =
2682 Attrs.addAlignmentAttr(Alignment);
2684 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2690 I != E; ++I, ++ArgNo) {
2696 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2698 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2699 llvm::AttributeSet::get(
2701 llvm::AttrBuilder(
getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2706 if (CodeGenOpts.EnableNoundefAttrs &&
2708 Attrs.addAttribute(llvm::Attribute::NoUndef);
2717 Attrs.addAttribute(llvm::Attribute::SExt);
2719 Attrs.addAttribute(llvm::Attribute::ZExt);
2723 Attrs.addAttribute(llvm::Attribute::Nest);
2725 Attrs.addAttribute(llvm::Attribute::InReg);
2726 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2733 Attrs.addAttribute(llvm::Attribute::InReg);
2736 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2739 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2740 Decl->getArgPassingRestrictions() ==
2744 Attrs.addAttribute(llvm::Attribute::NoAlias);
2769 AddPotentialArgAccess();
2774 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2785 AddPotentialArgAccess();
2792 Attrs.addDereferenceableAttr(
2794 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2795 !CodeGenOpts.NullPointerIsValid)
2796 Attrs.addAttribute(llvm::Attribute::NonNull);
2798 llvm::Align Alignment =
2800 Attrs.addAlignmentAttr(Alignment);
2808 if (TargetDecl && TargetDecl->
hasAttr<OpenCLKernelAttr>() &&
2812 llvm::Align Alignment =
2814 Attrs.addAlignmentAttr(Alignment);
2826 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2831 Attrs.addAttribute(llvm::Attribute::NoAlias);
2835 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2837 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2838 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2844 Attrs.addAttribute(llvm::Attribute::SwiftError);
2848 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2852 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2857 Attrs.addAttribute(llvm::Attribute::NoCapture);
2859 if (Attrs.hasAttributes()) {
2860 unsigned FirstIRArg, NumIRArgs;
2861 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2862 for (
unsigned i = 0; i < NumIRArgs; i++)
2863 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2869 AttrList = llvm::AttributeList::get(
2878 llvm::Value *value) {
2883 if (value->getType() == varType)
return value;
2885 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2886 &&
"unexpected promotion type");
2888 if (isa<llvm::IntegerType>(varType))
2889 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
2891 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
2897 QualType ArgType,
unsigned ArgNo) {
2909 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
2916 if (NNAttr->isNonNull(ArgNo))
2946 if (FD->hasImplicitReturnZero()) {
2947 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2949 llvm::Constant*
Zero = llvm::Constant::getNullValue(LLVMTy);
2958 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2963 if (IRFunctionArgs.hasInallocaArg())
2964 ArgStruct =
Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2968 if (IRFunctionArgs.hasSRetArg()) {
2969 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2970 AI->setName(
"agg.result");
2971 AI->addAttr(llvm::Attribute::NoAlias);
2978 ArgVals.reserve(Args.size());
2984 assert(FI.
arg_size() == Args.size() &&
2985 "Mismatch between function signature & arguments.");
2988 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2989 i != e; ++i, ++info_it, ++ArgNo) {
2994 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
3002 unsigned FirstIRArg, NumIRArgs;
3003 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3007 assert(NumIRArgs == 0);
3020 assert(NumIRArgs == 1);
3060 auto AI = Fn->getArg(FirstIRArg);
3068 assert(NumIRArgs == 1);
3070 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3073 PVD->getFunctionScopeIndex()) &&
3075 AI->addAttr(llvm::Attribute::NonNull);
3077 QualType OTy = PVD->getOriginalType();
3078 if (
const auto *ArrTy =
3085 QualType ETy = ArrTy->getElementType();
3086 llvm::Align Alignment =
3088 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3089 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
3093 Attrs.addDereferenceableAttr(
3094 getContext().getTypeSizeInChars(ETy).getQuantity() *
3096 AI->addAttrs(Attrs);
3097 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3100 AI->addAttr(llvm::Attribute::NonNull);
3103 }
else if (
const auto *ArrTy =
3109 QualType ETy = ArrTy->getElementType();
3110 llvm::Align Alignment =
3112 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3113 if (!
getTypes().getTargetAddressSpace(ETy) &&
3115 AI->addAttr(llvm::Attribute::NonNull);
3120 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3123 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3124 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3128 llvm::ConstantInt *AlignmentCI =
3131 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3132 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3133 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3134 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(
3135 llvm::Align(AlignmentInt)));
3142 AI->addAttr(llvm::Attribute::NoAlias);
3150 assert(NumIRArgs == 1);
3154 llvm::Value *
V = AI;
3162 getContext().getTypeAlignInChars(pointeeTy));
3185 if (
V->getType() != LTy)
3196 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3197 llvm::Value *Coerced = Fn->getArg(FirstIRArg);
3198 if (
auto *VecTyFrom =
3199 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3202 if (VecTyFrom->getElementType()->isIntegerTy(1) &&
3203 VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
3204 VecTyTo->getElementType() ==
Builder.getInt8Ty()) {
3205 VecTyFrom = llvm::ScalableVectorType::get(
3206 VecTyTo->getElementType(),
3207 VecTyFrom->getElementCount().getKnownMinValue() / 8);
3208 Coerced =
Builder.CreateBitCast(Coerced, VecTyFrom);
3210 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3213 assert(NumIRArgs == 1);
3214 Coerced->setName(Arg->
getName() +
".coerce");
3216 VecTyTo, Coerced, Zero,
"cast.fixed")));
3222 llvm::StructType *STy =
3225 STy->getNumElements() > 1) {
3226 [[maybe_unused]] llvm::TypeSize StructSize =
3228 [[maybe_unused]] llvm::TypeSize PtrElementSize =
3230 if (STy->containsHomogeneousScalableVectorTypes()) {
3231 assert(StructSize == PtrElementSize &&
3232 "Only allow non-fractional movement of structure with"
3233 "homogeneous scalable vector type");
3249 STy->getNumElements() > 1) {
3251 llvm::TypeSize PtrElementSize =
3253 if (StructSize.isScalable()) {
3254 assert(STy->containsHomogeneousScalableVectorTypes() &&
3255 "ABI only supports structure with homogeneous scalable vector "
3257 assert(StructSize == PtrElementSize &&
3258 "Only allow non-fractional movement of structure with"
3259 "homogeneous scalable vector type");
3260 assert(STy->getNumElements() == NumIRArgs);
3262 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3263 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3264 auto *AI = Fn->getArg(FirstIRArg + i);
3265 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3267 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3272 uint64_t SrcSize = StructSize.getFixedValue();
3273 uint64_t DstSize = PtrElementSize.getFixedValue();
3276 if (SrcSize <= DstSize) {
3283 assert(STy->getNumElements() == NumIRArgs);
3284 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3285 auto AI = Fn->getArg(FirstIRArg + i);
3286 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3291 if (SrcSize > DstSize) {
3297 assert(NumIRArgs == 1);
3298 auto AI = Fn->getArg(FirstIRArg);
3299 AI->setName(Arg->
getName() +
".coerce");
3324 unsigned argIndex = FirstIRArg;
3325 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3326 llvm::Type *eltType = coercionType->getElementType(i);
3331 auto elt = Fn->getArg(argIndex++);
3334 assert(argIndex == FirstIRArg + NumIRArgs);
3346 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3347 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3348 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3349 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3350 auto AI = Fn->getArg(FirstIRArg + i);
3351 AI->setName(Arg->
getName() +
"." + Twine(i));
3357 assert(NumIRArgs == 0);
3369 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3370 for (
int I = Args.size() - 1; I >= 0; --I)
3373 for (
unsigned I = 0, E = Args.size(); I != E; ++I)
3379 while (insn->use_empty()) {
3380 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3381 if (!bitcast)
return;
3384 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3385 bitcast->eraseFromParent();
3391 llvm::Value *result) {
3393 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3394 if (BB->empty())
return nullptr;
3395 if (&BB->back() != result)
return nullptr;
3397 llvm::Type *resultType = result->getType();
3400 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3406 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3409 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3412 if (generator->getNextNode() != bitcast)
3415 InstsToKill.push_back(bitcast);
3422 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3423 if (!call)
return nullptr;
3425 bool doRetainAutorelease;
3428 doRetainAutorelease =
true;
3429 }
else if (call->getCalledOperand() ==
3431 doRetainAutorelease =
false;
3439 llvm::Instruction *prev = call->getPrevNode();
3441 if (isa<llvm::BitCastInst>(prev)) {
3442 prev = prev->getPrevNode();
3445 assert(isa<llvm::CallInst>(prev));
3446 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3448 InstsToKill.push_back(prev);
3454 result = call->getArgOperand(0);
3455 InstsToKill.push_back(call);
3459 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3460 if (!bitcast->hasOneUse())
break;
3461 InstsToKill.push_back(bitcast);
3462 result = bitcast->getOperand(0);
3466 for (
auto *I : InstsToKill)
3467 I->eraseFromParent();
3470 if (doRetainAutorelease)
3474 return CGF.
Builder.CreateBitCast(result, resultType);
3479 llvm::Value *result) {
3482 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3483 if (!method)
return nullptr;
3489 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3490 if (!retainCall || retainCall->getCalledOperand() !=
3495 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3496 llvm::LoadInst *load =
3497 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3498 if (!load || load->isAtomic() || load->isVolatile() ||
3505 llvm::Type *resultType = result->getType();
3507 assert(retainCall->use_empty());
3508 retainCall->eraseFromParent();
3511 return CGF.
Builder.CreateBitCast(load, resultType);
3518 llvm::Value *result) {
3539 auto GetStoreIfValid = [&CGF](llvm::User *
U) -> llvm::StoreInst * {
3540 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3547 assert(!SI->isAtomic() &&
3556 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3557 if (IP->empty())
return nullptr;
3561 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3562 if (isa<llvm::BitCastInst>(&I))
3564 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3565 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3568 return GetStoreIfValid(&I);
3573 llvm::StoreInst *store =
3575 if (!store)
return nullptr;
3579 llvm::BasicBlock *StoreBB = store->getParent();
3580 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3582 while (IP != StoreBB) {
3583 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3599 int BitWidth,
int CharWidth) {
3600 assert(CharWidth <= 64);
3601 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3604 if (BitOffset >= CharWidth) {
3605 Pos += BitOffset / CharWidth;
3606 BitOffset = BitOffset % CharWidth;
3609 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3610 if (BitOffset + BitWidth >= CharWidth) {
3611 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3612 BitWidth -= CharWidth - BitOffset;
3616 while (BitWidth >= CharWidth) {
3618 BitWidth -= CharWidth;
3622 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3630 int StorageSize,
int BitOffset,
int BitWidth,
3631 int CharWidth,
bool BigEndian) {
3634 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3637 std::reverse(TmpBits.begin(), TmpBits.end());
3639 for (uint64_t
V : TmpBits)
3640 Bits[StorageOffset++] |=
V;
3671 BFI.
Size, CharWidth,
3693 auto Src = TmpBits.begin();
3694 auto Dst = Bits.begin() + Offset + I * Size;
3695 for (
int J = 0; J < Size; ++J)
3715 std::fill_n(Bits.begin() + Offset, Size,
3720 int Pos,
int Size,
int CharWidth,
3725 for (
auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size;
P != E;
3727 Mask = (Mask << CharWidth) | *
P;
3729 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3731 Mask = (Mask << CharWidth) | *--
P;
3740 llvm::IntegerType *ITy,
3742 assert(Src->getType() == ITy);
3743 assert(ITy->getScalarSizeInBits() <= 64);
3746 int Size = DataLayout.getTypeStoreSize(ITy);
3754 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3760 llvm::ArrayType *ATy,
3763 int Size = DataLayout.getTypeStoreSize(ATy);
3770 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3772 llvm::Value *R = llvm::PoisonValue::get(ATy);
3773 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3775 DataLayout.isBigEndian());
3776 MaskIndex += CharsPerElt;
3777 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3778 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3779 R =
Builder.CreateInsertValue(R, T1, I);
3806 llvm::DebugLoc RetDbgLoc;
3807 llvm::Value *RV =
nullptr;
3817 llvm::Function::arg_iterator EI =
CurFn->arg_end();
3819 llvm::Value *ArgStruct = &*EI;
3823 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3829 auto AI =
CurFn->arg_begin();
3868 if (llvm::StoreInst *SI =
3874 RetDbgLoc = SI->getDebugLoc();
3876 RV = SI->getValueOperand();
3877 SI->eraseFromParent();
3900 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
3901 RT = FD->getReturnType();
3902 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
3903 RT = MD->getReturnType();
3907 llvm_unreachable(
"Unexpected function/method type");
3927 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3934 results.push_back(elt);
3938 if (results.size() == 1) {
3946 RV = llvm::PoisonValue::get(returnType);
3947 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
3948 RV =
Builder.CreateInsertValue(RV, results[i], i);
3955 llvm_unreachable(
"Invalid ABI kind for return argument");
3958 llvm::Instruction *
Ret;
3964 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3975 Ret->setDebugLoc(std::move(RetDbgLoc));
3988 ReturnsNonNullAttr *RetNNAttr =
nullptr;
3989 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
3992 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4000 assert(!requiresReturnValueNullabilityCheck() &&
4001 "Cannot check nullability and the nonnull attribute");
4002 AttrLoc = RetNNAttr->getLocation();
4003 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
4004 Handler = SanitizerHandler::NonnullReturn;
4006 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4007 if (
auto *TSI = DD->getTypeSourceInfo())
4009 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4010 CheckKind = SanitizerKind::NullabilityReturn;
4011 Handler = SanitizerHandler::NullabilityReturn;
4014 SanitizerScope SanScope(
this);
4021 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4022 if (requiresReturnValueNullabilityCheck())
4024 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4025 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4029 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4031 llvm::Value *DynamicData[] = {SLocPtr};
4032 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4052 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4053 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4080 if (
type->isReferenceType()) {
4089 param->
hasAttr<NSConsumedAttr>() &&
4090 type->isObjCRetainableType()) {
4093 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4108 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4110 "cleanup for callee-destructed param not recorded");
4112 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4118 return isa<llvm::ConstantPointerNull>(addr);
4127 "shouldn't have writeback for provably null argument");
4129 llvm::BasicBlock *contBB =
nullptr;
4133 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.
getPointer(),
4135 if (!provablyNonNull) {
4139 llvm::Value *isNull =
4141 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4150 "icr.writeback-cast");
4159 if (writeback.
ToUse) {
4184 if (!provablyNonNull)
4199 for (
const auto &I : llvm::reverse(Cleanups)) {
4201 I.IsActiveIP->eraseFromParent();
4207 if (uop->getOpcode() == UO_AddrOf)
4208 return uop->getSubExpr();
4238 llvm::PointerType *destType =
4240 llvm::Type *destElemType =
4257 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4263 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4267 llvm::BasicBlock *contBB =
nullptr;
4268 llvm::BasicBlock *originBB =
nullptr;
4271 llvm::Value *finalArgument;
4273 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.
getPointer(),
4275 if (provablyNonNull) {
4278 llvm::Value *isNull =
4281 finalArgument = CGF.
Builder.CreateSelect(isNull,
4282 llvm::ConstantPointerNull::get(destType),
4288 originBB = CGF.
Builder.GetInsertBlock();
4291 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4293 condEval.begin(CGF);
4297 llvm::Value *valueToUse =
nullptr;
4305 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4322 if (shouldCopy && !provablyNonNull) {
4323 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4328 llvm::PHINode *phiToUse = CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
4330 phiToUse->addIncoming(valueToUse, copyBB);
4331 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4333 valueToUse = phiToUse;
4347 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4353 CGF.
Builder.CreateStackRestore(StackBase);
4361 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4366 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4367 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4370 const NonNullAttr *NNAttr =
nullptr;
4371 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4374 bool CanCheckNullability =
false;
4375 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
4376 auto Nullability = PVD->getType()->getNullability();
4377 CanCheckNullability = Nullability &&
4379 PVD->getTypeSourceInfo();
4382 if (!NNAttr && !CanCheckNullability)
4389 AttrLoc = NNAttr->getLocation();
4390 CheckKind = SanitizerKind::NonnullAttribute;
4391 Handler = SanitizerHandler::NonnullArg;
4393 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4394 CheckKind = SanitizerKind::NullabilityArg;
4395 Handler = SanitizerHandler::NullabilityArg;
4398 SanitizerScope SanScope(
this);
4400 llvm::Constant *StaticData[] = {
4402 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4404 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4420 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4431 return classDecl->getTypeParamListAsWritten();
4435 return catDecl->getTypeParamList();
4445 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4446 AbstractCallee AC,
unsigned ParamsToSkip, EvaluationOrder Order) {
4449 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4450 "Can't skip parameters if type info is not provided");
4460 bool IsVariadic =
false;
4467 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4468 MD->param_type_end());
4472 ExplicitCC = FPT->getExtInfo().getCC();
4473 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4474 FPT->param_type_end());
4482 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4484 (isGenericMethod || Ty->isVariablyModifiedType() ||
4485 Ty.getNonReferenceType()->isObjCRetainableType() ||
4487 .getCanonicalType(Ty.getNonReferenceType())
4489 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4490 "type mismatch in call argument!");
4496 assert((Arg == ArgRange.end() || IsVariadic) &&
4497 "Extra arguments in non-variadic function!");
4502 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4503 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4504 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4516 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4518 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4520 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4527 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4528 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4529 EmittedArg.getScalarVal(),
4535 std::swap(Args.back(), *(&Args.back() - 1));
4540 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4541 "inalloca only supported on x86");
4546 size_t CallArgsStart = Args.size();
4547 for (
unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4548 unsigned Idx = LeftToRight ? I : E - I - 1;
4550 unsigned InitialArgSize = Args.size();
4553 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4554 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4556 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4558 "Argument and parameter types don't match");
4562 assert(InitialArgSize + 1 == Args.size() &&
4563 "The code below depends on only adding one arg per EmitCallArg");
4564 (void)InitialArgSize;
4567 if (!Args.back().hasLValue()) {
4568 RValue RVArg = Args.back().getKnownRValue();
4570 ParamsToSkip + Idx);
4574 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4581 std::reverse(Args.begin() + CallArgsStart, Args.end());
4589 : Addr(Addr), Ty(Ty) {}
4607struct DisableDebugLocationUpdates {
4609 bool disabledDebugInfo;
4611 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.
getDebugInfo()))
4614 ~DisableDebugLocationUpdates() {
4615 if (disabledDebugInfo)
4651 DisableDebugLocationUpdates Dis(*
this, E);
4653 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4659 "reference binding to unmaterialized r-value!");
4671 if (
type->isRecordType() &&
4678 bool DestroyedInCallee =
true, NeedsEHCleanup =
true;
4679 if (
const auto *RD =
type->getAsCXXRecordDecl())
4680 DestroyedInCallee = RD->hasNonTrivialDestructor();
4684 if (DestroyedInCallee)
4691 if (DestroyedInCallee && NeedsEHCleanup) {
4698 llvm::Instruction *IsActive =
Builder.CreateUnreachable();
4704 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4705 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4715QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4719 if (!
getTarget().getTriple().isOSWindows())
4736CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4739 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4746 const llvm::Twine &name) {
4754 const llvm::Twine &name) {
4756 call->setDoesNotThrow();
4763 const llvm::Twine &name) {
4778 if (
auto *CalleeFn = dyn_cast<llvm::Function>(
Callee->stripPointerCasts())) {
4779 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4780 auto IID = CalleeFn->getIntrinsicID();
4781 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4794 const llvm::Twine &name) {
4795 llvm::CallInst *call =
Builder.CreateCall(
4808 llvm::InvokeInst *invoke =
4814 invoke->setDoesNotReturn();
4817 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
4818 call->setDoesNotReturn();
4827 const Twine &name) {
4835 const Twine &name) {
4845 const Twine &Name) {
4850 llvm::CallBase *Inst;
4852 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
4855 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4863 AddObjCARCExceptionMetadata(Inst);
4868void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4870 DeferredReplacements.push_back(
4871 std::make_pair(llvm::WeakTrackingVH(Old), New));
4878[[nodiscard]] llvm::AttributeList
4879maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4880 const llvm::AttributeList &Attrs,
4881 llvm::Align NewAlign) {
4882 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4883 if (CurAlign >= NewAlign)
4885 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4886 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4887 .addRetAttribute(Ctx, AlignAttr);
4890template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
4895 const AlignedAttrTy *AA =
nullptr;
4897 llvm::Value *Alignment =
nullptr;
4898 llvm::ConstantInt *OffsetCI =
nullptr;
4904 AA = FuncDecl->
getAttr<AlignedAttrTy>();
4909 [[nodiscard]] llvm::AttributeList
4910 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
4911 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
4913 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4918 if (!AlignmentCI->getValue().isPowerOf2())
4920 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4923 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4935 AA->getLocation(), Alignment, OffsetCI);
4941class AssumeAlignedAttrEmitter final
4942 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4945 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4949 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
4950 if (
Expr *Offset = AA->getOffset()) {
4952 if (OffsetCI->isNullValue())
4959class AllocAlignAttrEmitter final
4960 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4964 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4968 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4977 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
4978 return VT->getPrimitiveSizeInBits().getKnownMinValue();
4979 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
4982 unsigned MaxVectorWidth = 0;
4983 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
4984 for (
auto *I : ST->elements())
4986 return MaxVectorWidth;
4993 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5006 const Decl *TargetDecl =
Callee.getAbstractInfo().getCalleeDecl().getDecl();
5007 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5014 if (TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5015 (TargetDecl->
hasAttr<TargetAttr>() ||
5022 CGM, Loc, dyn_cast_or_null<FunctionDecl>(
CurCodeDecl), FD, CallArgs);
5030 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5033 llvm::AllocaInst *AI;
5035 IP = IP->getNextNode();
5036 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
5042 AI->setAlignment(Align.getAsAlign());
5043 AI->setUsedWithInAlloca(
true);
5044 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5045 ArgMemory =
Address(AI, ArgStruct, Align);
5048 ClangToLLVMArgMapping IRFunctionArgs(
CGM.
getContext(), CallInfo);
5055 llvm::Value *UnusedReturnSizePtr =
nullptr;
5062 llvm::TypeSize size =
5067 if (IRFunctionArgs.hasSRetArg()) {
5068 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.
getPointer();
5085 assert(CallInfo.
arg_size() == CallArgs.size() &&
5086 "Mismatch between function signature & arguments.");
5089 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5090 I != E; ++I, ++info_it, ++ArgNo) {
5094 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5095 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5098 unsigned FirstIRArg, NumIRArgs;
5099 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5101 bool ArgHasMaybeUndefAttr =
5106 assert(NumIRArgs == 0);
5107 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5108 if (I->isAggregate()) {
5110 ? I->getKnownLValue().getAddress(*
this)
5111 : I->getKnownRValue().getAggregateAddress();
5112 llvm::Instruction *Placeholder =
5117 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5118 Builder.SetInsertPoint(Placeholder);
5131 deferPlaceholderReplacement(Placeholder, Addr.
getPointer());
5136 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5137 "indirect-arg-temp");
5138 I->copyInto(*
this, Addr);
5147 I->copyInto(*
this, Addr);
5154 assert(NumIRArgs == 1);
5155 if (!I->isAggregate()) {
5161 if (ArgHasMaybeUndefAttr)
5163 IRCallArgs[FirstIRArg] = Val;
5165 I->copyInto(*
this, Addr);
5176 ? I->getKnownLValue().getAddress(*
this)
5177 : I->getKnownRValue().getAggregateAddress();
5182 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5183 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5184 TD->getAllocaAddrSpace()) &&
5185 "indirect argument must be in alloca address space");
5187 bool NeedCopy =
false;
5189 llvm::getOrEnforceKnownAlignment(
V, Align.
getAsAlign(), *TD) <
5192 }
else if (I->hasLValue()) {
5193 auto LV = I->getKnownLValue();
5199 if (!isByValOrRef ||
5204 if ((isByValOrRef &&
5212 else if ((isByValOrRef &&
5213 Addr.
getType()->getAddressSpace() != IRFuncTy->
5224 if (ArgHasMaybeUndefAttr)
5226 IRCallArgs[FirstIRArg] = Val;
5229 llvm::TypeSize ByvalTempElementSize =
5231 llvm::Value *LifetimeSize =
5236 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5239 I->copyInto(*
this, AI);
5242 auto *T = llvm::PointerType::get(
5248 if (ArgHasMaybeUndefAttr)
5249 Val =
Builder.CreateFreeze(Val);
5250 IRCallArgs[FirstIRArg] = Val;
5257 assert(NumIRArgs == 0);
5265 assert(NumIRArgs == 1);
5267 if (!I->isAggregate())
5268 V = I->getKnownRValue().getScalarVal();
5271 I->hasLValue() ? I->getKnownLValue().getAddress(*
this)
5272 : I->getKnownRValue().getAggregateAddress());
5278 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5282 getContext().getTypeAlignInChars(pointeeTy));
5287 cast<llvm::AllocaInst>(
V)->setSwiftError(
true);
5295 V->getType()->isIntegerTy())
5300 if (FirstIRArg < IRFuncTy->getNumParams() &&
5301 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5302 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(FirstIRArg));
5304 if (ArgHasMaybeUndefAttr)
5306 IRCallArgs[FirstIRArg] =
V;
5310 llvm::StructType *STy =
5314 [[maybe_unused]] llvm::TypeSize SrcTypeSize =
5316 [[maybe_unused]] llvm::TypeSize DstTypeSize =
5318 if (STy->containsHomogeneousScalableVectorTypes()) {
5319 assert(SrcTypeSize == DstTypeSize &&
5320 "Only allow non-fractional movement of structure with "
5321 "homogeneous scalable vector type");
5323 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5330 if (!I->isAggregate()) {
5332 I->copyInto(*
this, Src);
5334 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*
this)
5335 : I->getKnownRValue().getAggregateAddress();
5345 llvm::TypeSize SrcTypeSize =
5348 if (SrcTypeSize.isScalable()) {
5349 assert(STy->containsHomogeneousScalableVectorTypes() &&
5350 "ABI only supports structure with homogeneous scalable vector "
5352 assert(SrcTypeSize == DstTypeSize &&
5353 "Only allow non-fractional movement of structure with "
5354 "homogeneous scalable vector type");
5355 assert(NumIRArgs == STy->getNumElements());
5357 llvm::Value *StoredStructValue =
5359 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5360 llvm::Value *Extract =
Builder.CreateExtractValue(
5361 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5362 IRCallArgs[FirstIRArg + i] = Extract;
5365 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5366 uint64_t DstSize = DstTypeSize.getFixedValue();
5372 if (SrcSize < DstSize) {
5381 assert(NumIRArgs == STy->getNumElements());
5382 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5385 if (ArgHasMaybeUndefAttr)
5386 LI =
Builder.CreateFreeze(LI);
5387 IRCallArgs[FirstIRArg + i] = LI;
5392 assert(NumIRArgs == 1);
5400 auto *ATy = dyn_cast<llvm::ArrayType>(
Load->getType());
5401 if (ATy !=
nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5405 if (ArgHasMaybeUndefAttr)
5407 IRCallArgs[FirstIRArg] =
Load;
5417 llvm::Value *tempSize =
nullptr;
5420 if (I->isAggregate()) {
5421 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*
this)
5422 : I->getKnownRValue().getAggregateAddress();
5425 RValue RV = I->getKnownRValue();
5437 nullptr, &AllocaAddr);
5445 unsigned IRArgPos = FirstIRArg;
5446 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5447 llvm::Type *eltType = coercionType->getElementType(i);
5451 if (ArgHasMaybeUndefAttr)
5452 elt =
Builder.CreateFreeze(elt);
5453 IRCallArgs[IRArgPos++] = elt;
5455 assert(IRArgPos == FirstIRArg + NumIRArgs);
5465 unsigned IRArgPos = FirstIRArg;
5466 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5467 assert(IRArgPos == FirstIRArg + NumIRArgs);
5473 const CGCallee &ConcreteCallee =
Callee.prepareConcreteCallee(*
this);
5479 assert(IRFunctionArgs.hasInallocaArg());
5480 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5491 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5492 llvm::Value *Ptr) -> llvm::Function * {
5493 if (!CalleeFT->isVarArg())
5497 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5498 if (CE->getOpcode() == llvm::Instruction::BitCast)
5499 Ptr = CE->getOperand(0);
5502 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5506 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5510 if (OrigFT->isVarArg() ||
5511 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5512 OrigFT->getReturnType() != CalleeFT->getReturnType())
5515 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5516 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5522 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5524 IRFuncTy = OrigFn->getFunctionType();
5539 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5540 for (
unsigned i = 0; i < IRCallArgs.size(); ++i) {
5542 if (IRFunctionArgs.hasInallocaArg() &&
5543 i == IRFunctionArgs.getInallocaArgNo())
5545 if (i < IRFuncTy->getNumParams())
5546 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5551 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5552 LargestVectorWidth = std::max(LargestVectorWidth,
5557 llvm::AttributeList Attrs;
5564 if (FD->hasAttr<StrictFPAttr>())
5566 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5571 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5577 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5581 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5586 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5595 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>())) {
5597 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5602 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5609 CannotThrow =
false;
5618 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5620 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5621 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5629 if (UnusedReturnSizePtr)
5631 UnusedReturnSizePtr);
5633 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr :
getInvokeDest();
5639 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5643 if (FD->hasAttr<StrictFPAttr>())
5645 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5647 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
5648 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5650 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
5651 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5656 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5659 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5663 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5664 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
5673 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
5674 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
5675 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5681 CI->setAttributes(Attrs);
5682 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
5686 if (!CI->getType()->isVoidTy())
5687 CI->setName(
"call");
5690 LargestVectorWidth =
5696 if (!CI->getCalledFunction())
5703 AddObjCARCExceptionMetadata(CI);
5706 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
5707 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
5708 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5709 else if (IsMustTail)
5710 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5715 TargetDecl->
hasAttr<MSAllocatorAttr>())
5719 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
5720 llvm::ConstantInt *
Line =
5722 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(
Line);
5724 CI->setMetadata(
"srcloc", MDT);
5732 if (CI->doesNotReturn()) {
5733 if (UnusedReturnSizePtr)
5737 if (
SanOpts.
has(SanitizerKind::Unreachable)) {
5740 if (
auto *F = CI->getCalledFunction())
5741 F->removeFnAttr(llvm::Attribute::NoReturn);
5742 CI->removeFnAttr(llvm::Attribute::NoReturn);
5747 SanitizerKind::KernelAddress)) {
5748 SanitizerScope SanScope(
this);
5749 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
5751 auto *FnType = llvm::FunctionType::get(
CGM.
VoidTy,
false);
5752 llvm::FunctionCallee Fn =
5759 Builder.ClearInsertionPoint();
5779 if (CI->getType()->isVoidTy())
5783 Builder.ClearInsertionPoint();
5789 if (swiftErrorTemp.
isValid()) {
5812 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5814 unsigned unpaddedIndex = 0;
5815 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5816 llvm::Type *eltType = coercionType->getElementType(i);
5819 llvm::Value *elt = CI;
5820 if (requiresExtract)
5821 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
5823 assert(unpaddedIndex == 0);
5832 if (UnusedReturnSizePtr)
5848 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
5849 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
5858 DestIsVolatile =
false;
5866 llvm::Value *
V = CI;
5867 if (
V->getType() != RetIRTy)
5872 llvm_unreachable(
"bad evaluation kind");
5878 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
5879 llvm::Value *
V = CI;
5880 if (
auto *ScalableSrcTy =
5881 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
5882 if (FixedDstTy->getElementType() == ScalableSrcTy->getElementType()) {
5884 V =
Builder.CreateExtractVector(FixedDstTy,
V, Zero,
"cast.fixed");
5895 DestIsVolatile =
false;
5912 llvm_unreachable(
"Invalid ABI kind for return argument");
5915 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
5919 if (
Ret.isScalar() && TargetDecl) {
5920 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5921 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5926 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5927 LifetimeEnd.Emit(*
this, {});
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsWindows)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
static bool isInstanceMethod(const Decl *D)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin=false) const
Retrieves the default calling convention for the current target.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const LLVM_READONLY
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::Value * getPointer() const
llvm::StringRef getName() const
Return the IR name of the pointer value.
llvm::PointerType * getType() const
Return the type of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
const BlockExpr * BlockExpression
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
GlobalDecl getVirtualMethodDecl() const
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void PopCleanupBlock(bool FallThroughIsBranchThrough=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
const TargetInfo & getTarget() const
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
CGDebugInfo * getDebugInfo()
Address EmitVAListRef(const Expr *E)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile)
Build all the stores needed to initialize an aggregate at Dest with the value Val.
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Address CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Type * ConvertType(QualType T)
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
CodeGenTypes & getTypes() const
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const CallExpr * MustTailCall
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
This class organizes the cross-function state that is used while generating LLVM code.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
ObjCEntrypoints & getObjCEntrypoints() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const ABIInfo & getABIInfo() const
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
EHScopeStack::Cleanup * getCleanup()
Information for lazily generating a cleanup.
virtual bool isRedundantBeforeReturn()
A saved depth on the scope stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator end() const
Returns an iterator pointing to the outermost EH scope.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
bool isVolatileQualified() const
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
LangAS getAddressSpace() const
CharUnits getAlignment() const
Address getAddress(CodeGenFunction &CGF) const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Qualifiers::ObjCLifetime getObjCLifetime() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isZeroLengthBitField(const ASTContext &Ctx) const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isVariadic() const
Whether this function prototype is variadic.
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
const Decl * getDecl() const
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
bool isParamDestroyedInCallee() const
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
The base class of the type hierarchy.
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
bool isBlockPointerType() const
bool isIncompleteArrayType() const
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
QualType getCanonicalTypeInternal() const
bool isMemberPointerType() const
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isObjCRetainableType() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
const Expr * getSubExpr() const
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
bool This(InterpState &S, CodePtr OpPC)
bool Zero(InterpState &S, CodePtr OpPC)
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Result
The result type of a method or function.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::BasicBlock * getBlock() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
CharUnits getPointerAlign() const
LangAS getASTAllocaAddressSpace() const
bool isMSVCXXPersonality() const
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Iterator for iterating over Stmt * arrays that contain only T *.