clang 19.0.0git
CGCall.cpp
Go to the documentation of this file.
1//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCall.h"
15#include "ABIInfo.h"
16#include "ABIInfoImpl.h"
17#include "CGBlocks.h"
18#include "CGCXXABI.h"
19#include "CGCleanup.h"
20#include "CGRecordLayout.h"
21#include "CodeGenFunction.h"
22#include "CodeGenModule.h"
23#include "TargetInfo.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/DeclCXX.h"
27#include "clang/AST/DeclObjC.h"
32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
44#include <optional>
45using namespace clang;
46using namespace CodeGen;
47
48/***/
49
51 switch (CC) {
52 default: return llvm::CallingConv::C;
53 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
54 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
55 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
56 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
57 case CC_Win64: return llvm::CallingConv::Win64;
58 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
59 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
61 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
62 // TODO: Add support for __pascal to LLVM.
63 case CC_X86Pascal: return llvm::CallingConv::C;
64 // TODO: Add support for __vectorcall to LLVM.
65 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
66 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
67 case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall;
68 case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL;
69 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
71 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
72 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
73 case CC_Swift: return llvm::CallingConv::Swift;
74 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
75 case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
76 case CC_PreserveNone: return llvm::CallingConv::PreserveNone;
77 }
78}
79
80/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
81/// qualification. Either or both of RD and MD may be null. A null RD indicates
82/// that there is no meaningful 'this' type, and a null MD can occur when
83/// calling a method pointer.
85 const CXXMethodDecl *MD) {
86 QualType RecTy;
87 if (RD)
88 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
89 else
90 RecTy = Context.VoidTy;
91
92 if (MD)
93 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
94 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
95}
96
97/// Returns the canonical formal type of the given C++ method.
101}
102
103/// Returns the "extra-canonicalized" return type, which discards
104/// qualifiers on the return type. Codegen doesn't care about them,
105/// and it makes ABI code a little easier to be able to assume that
106/// all parameter and return types are top-level unqualified.
109}
110
111/// Arrange the argument and result information for a value of the given
112/// unprototyped freestanding function type.
113const CGFunctionInfo &
115 // When translating an unprototyped function type, always use a
116 // variadic type.
117 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
118 FnInfoOpts::None, std::nullopt,
119 FTNP->getExtInfo(), {}, RequiredArgs(0));
120}
121
124 const FunctionProtoType *proto,
125 unsigned prefixArgs,
126 unsigned totalArgs) {
127 assert(proto->hasExtParameterInfos());
128 assert(paramInfos.size() <= prefixArgs);
129 assert(proto->getNumParams() + prefixArgs <= totalArgs);
130
131 paramInfos.reserve(totalArgs);
132
133 // Add default infos for any prefix args that don't already have infos.
134 paramInfos.resize(prefixArgs);
135
136 // Add infos for the prototype.
137 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
138 paramInfos.push_back(ParamInfo);
139 // pass_object_size params have no parameter info.
140 if (ParamInfo.hasPassObjectSize())
141 paramInfos.emplace_back();
142 }
143
144 assert(paramInfos.size() <= totalArgs &&
145 "Did we forget to insert pass_object_size args?");
146 // Add default infos for the variadic and/or suffix arguments.
147 paramInfos.resize(totalArgs);
148}
149
150/// Adds the formal parameters in FPT to the given prefix. If any parameter in
151/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
152static void appendParameterTypes(const CodeGenTypes &CGT,
156 // Fast path: don't touch param info if we don't need to.
157 if (!FPT->hasExtParameterInfos()) {
158 assert(paramInfos.empty() &&
159 "We have paramInfos, but the prototype doesn't?");
160 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
161 return;
162 }
163
164 unsigned PrefixSize = prefix.size();
165 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
166 // parameters; the only thing that can change this is the presence of
167 // pass_object_size. So, we preallocate for the common case.
168 prefix.reserve(prefix.size() + FPT->getNumParams());
169
170 auto ExtInfos = FPT->getExtParameterInfos();
171 assert(ExtInfos.size() == FPT->getNumParams());
172 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
173 prefix.push_back(FPT->getParamType(I));
174 if (ExtInfos[I].hasPassObjectSize())
175 prefix.push_back(CGT.getContext().getSizeType());
176 }
177
178 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
179 prefix.size());
180}
181
182/// Arrange the LLVM function layout for a value of the given function
183/// type, on top of any implicit parameters already stored.
184static const CGFunctionInfo &
185arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
190 // FIXME: Kill copy.
191 appendParameterTypes(CGT, prefix, paramInfos, FTP);
192 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
193
194 FnInfoOpts opts =
196 return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix,
197 FTP->getExtInfo(), paramInfos, Required);
198}
199
200/// Arrange the argument and result information for a value of the
201/// given freestanding function type.
202const CGFunctionInfo &
205 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
206 FTP);
207}
208
210 bool IsWindows) {
211 // Set the appropriate calling convention for the Function.
212 if (D->hasAttr<StdCallAttr>())
213 return CC_X86StdCall;
214
215 if (D->hasAttr<FastCallAttr>())
216 return CC_X86FastCall;
217
218 if (D->hasAttr<RegCallAttr>())
219 return CC_X86RegCall;
220
221 if (D->hasAttr<ThisCallAttr>())
222 return CC_X86ThisCall;
223
224 if (D->hasAttr<VectorCallAttr>())
225 return CC_X86VectorCall;
226
227 if (D->hasAttr<PascalAttr>())
228 return CC_X86Pascal;
229
230 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
231 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
232
233 if (D->hasAttr<AArch64VectorPcsAttr>())
235
236 if (D->hasAttr<AArch64SVEPcsAttr>())
237 return CC_AArch64SVEPCS;
238
239 if (D->hasAttr<AMDGPUKernelCallAttr>())
240 return CC_AMDGPUKernelCall;
241
242 if (D->hasAttr<IntelOclBiccAttr>())
243 return CC_IntelOclBicc;
244
245 if (D->hasAttr<MSABIAttr>())
246 return IsWindows ? CC_C : CC_Win64;
247
248 if (D->hasAttr<SysVABIAttr>())
249 return IsWindows ? CC_X86_64SysV : CC_C;
250
251 if (D->hasAttr<PreserveMostAttr>())
252 return CC_PreserveMost;
253
254 if (D->hasAttr<PreserveAllAttr>())
255 return CC_PreserveAll;
256
257 if (D->hasAttr<M68kRTDAttr>())
258 return CC_M68kRTD;
259
260 if (D->hasAttr<PreserveNoneAttr>())
261 return CC_PreserveNone;
262
263 return CC_C;
264}
265
266/// Arrange the argument and result information for a call to an
267/// unknown C++ non-static member function of the given abstract type.
268/// (A null RD means we don't have any meaningful "this" argument type,
269/// so fall back to a generic pointer type).
270/// The member function must be an ordinary function, i.e. not a
271/// constructor or destructor.
272const CGFunctionInfo &
274 const FunctionProtoType *FTP,
275 const CXXMethodDecl *MD) {
277
278 // Add the 'this' pointer.
279 argTypes.push_back(DeriveThisType(RD, MD));
280
281 return ::arrangeLLVMFunctionInfo(
282 *this, /*instanceMethod=*/true, argTypes,
284}
285
286/// Set calling convention for CUDA/HIP kernel.
288 const FunctionDecl *FD) {
289 if (FD->hasAttr<CUDAGlobalAttr>()) {
290 const FunctionType *FT = FTy->getAs<FunctionType>();
292 FTy = FT->getCanonicalTypeUnqualified();
293 }
294}
295
296/// Arrange the argument and result information for a declaration or
297/// definition of the given C++ non-static member function. The
298/// member function must be an ordinary function, i.e. not a
299/// constructor or destructor.
300const CGFunctionInfo &
302 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
303 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
304
307 auto prototype = FT.getAs<FunctionProtoType>();
308
310 // The abstract case is perfectly fine.
311 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
312 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
313 }
314
315 return arrangeFreeFunctionType(prototype);
316}
317
319 const InheritedConstructor &Inherited, CXXCtorType Type) {
320 // Parameters are unnecessary if we're constructing a base class subobject
321 // and the inherited constructor lives in a virtual base.
322 return Type == Ctor_Complete ||
323 !Inherited.getShadowDecl()->constructsVirtualBase() ||
324 !Target.getCXXABI().hasConstructorVariants();
325}
326
327const CGFunctionInfo &
329 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
330
333
334 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD);
335 argTypes.push_back(DeriveThisType(ThisType, MD));
336
337 bool PassParams = true;
338
339 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
340 // A base class inheriting constructor doesn't get forwarded arguments
341 // needed to construct a virtual base (or base class thereof).
342 if (auto Inherited = CD->getInheritedConstructor())
343 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
344 }
345
347
348 // Add the formal parameters.
349 if (PassParams)
350 appendParameterTypes(*this, argTypes, paramInfos, FTP);
351
353 TheCXXABI.buildStructorSignature(GD, argTypes);
354 if (!paramInfos.empty()) {
355 // Note: prefix implies after the first param.
356 if (AddedArgs.Prefix)
357 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
359 if (AddedArgs.Suffix)
360 paramInfos.append(AddedArgs.Suffix,
362 }
363
364 RequiredArgs required =
365 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
367
368 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
369 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
370 ? argTypes.front()
371 : TheCXXABI.hasMostDerivedReturn(GD)
372 ? CGM.getContext().VoidPtrTy
373 : Context.VoidTy;
375 argTypes, extInfo, paramInfos, required);
376}
377
381 for (auto &arg : args)
382 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
383 return argTypes;
384}
385
389 for (auto &arg : args)
390 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
391 return argTypes;
392}
393
396 unsigned prefixArgs, unsigned totalArgs) {
398 if (proto->hasExtParameterInfos()) {
399 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
400 }
401 return result;
402}
403
404/// Arrange a call to a C++ method, passing the given arguments.
405///
406/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
407/// parameter.
408/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
409/// args.
410/// PassProtoArgs indicates whether `args` has args for the parameters in the
411/// given CXXConstructorDecl.
412const CGFunctionInfo &
414 const CXXConstructorDecl *D,
415 CXXCtorType CtorKind,
416 unsigned ExtraPrefixArgs,
417 unsigned ExtraSuffixArgs,
418 bool PassProtoArgs) {
419 // FIXME: Kill copy.
421 for (const auto &Arg : args)
422 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
423
424 // +1 for implicit this, which should always be args[0].
425 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
426
428 RequiredArgs Required = PassProtoArgs
430 FPT, TotalPrefixArgs + ExtraSuffixArgs)
432
433 GlobalDecl GD(D, CtorKind);
434 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
435 ? ArgTypes.front()
436 : TheCXXABI.hasMostDerivedReturn(GD)
437 ? CGM.getContext().VoidPtrTy
438 : Context.VoidTy;
439
440 FunctionType::ExtInfo Info = FPT->getExtInfo();
442 // If the prototype args are elided, we should only have ABI-specific args,
443 // which never have param info.
444 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
445 // ABI-specific suffix arguments are treated the same as variadic arguments.
446 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
447 ArgTypes.size());
448 }
449
451 ArgTypes, Info, ParamInfos, Required);
452}
453
454/// Arrange the argument and result information for the declaration or
455/// definition of the given function.
456const CGFunctionInfo &
458 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
459 if (MD->isImplicitObjectMemberFunction())
461
463
464 assert(isa<FunctionType>(FTy));
465 setCUDAKernelCallingConvention(FTy, CGM, FD);
466
467 // When declaring a function without a prototype, always use a
468 // non-variadic type.
470 return arrangeLLVMFunctionInfo(noProto->getReturnType(), FnInfoOpts::None,
471 std::nullopt, noProto->getExtInfo(), {},
473 }
474
476}
477
478/// Arrange the argument and result information for the declaration or
479/// definition of an Objective-C method.
480const CGFunctionInfo &
482 // It happens that this is the same as a call with no optional
483 // arguments, except also using the formal 'self' type.
485}
486
487/// Arrange the argument and result information for the function type
488/// through which to perform a send to the given Objective-C method,
489/// using the given receiver type. The receiver type is not always
490/// the 'self' type of the method or even an Objective-C pointer type.
491/// This is *not* the right method for actually performing such a
492/// message send, due to the possibility of optional arguments.
493const CGFunctionInfo &
495 QualType receiverType) {
498 MD->isDirectMethod() ? 1 : 2);
499 argTys.push_back(Context.getCanonicalParamType(receiverType));
500 if (!MD->isDirectMethod())
501 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
502 // FIXME: Kill copy?
503 for (const auto *I : MD->parameters()) {
504 argTys.push_back(Context.getCanonicalParamType(I->getType()));
506 I->hasAttr<NoEscapeAttr>());
507 extParamInfos.push_back(extParamInfo);
508 }
509
511 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
512 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
513
514 if (getContext().getLangOpts().ObjCAutoRefCount &&
515 MD->hasAttr<NSReturnsRetainedAttr>())
516 einfo = einfo.withProducesResult(true);
517
518 RequiredArgs required =
519 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
520
522 FnInfoOpts::None, argTys, einfo, extParamInfos,
523 required);
524}
525
526const CGFunctionInfo &
528 const CallArgList &args) {
529 auto argTypes = getArgTypesForCall(Context, args);
531
533 argTypes, einfo, {}, RequiredArgs::All);
534}
535
536const CGFunctionInfo &
538 // FIXME: Do we need to handle ObjCMethodDecl?
539 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
540
541 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
542 isa<CXXDestructorDecl>(GD.getDecl()))
544
546}
547
548/// Arrange a thunk that takes 'this' as the first parameter followed by
549/// varargs. Return a void pointer, regardless of the actual return type.
550/// The body of the thunk will end in a musttail call to a function of the
551/// correct type, and the caller will bitcast the function to the correct
552/// prototype.
553const CGFunctionInfo &
555 assert(MD->isVirtual() && "only methods have thunks");
557 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
558 return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::None, ArgTys,
559 FTP->getExtInfo(), {}, RequiredArgs(1));
560}
561
562const CGFunctionInfo &
564 CXXCtorType CT) {
565 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
566
569 const CXXRecordDecl *RD = CD->getParent();
570 ArgTys.push_back(DeriveThisType(RD, CD));
571 if (CT == Ctor_CopyingClosure)
572 ArgTys.push_back(*FTP->param_type_begin());
573 if (RD->getNumVBases() > 0)
574 ArgTys.push_back(Context.IntTy);
576 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
578 ArgTys, FunctionType::ExtInfo(CC), {},
580}
581
582/// Arrange a call as unto a free function, except possibly with an
583/// additional number of formal parameters considered required.
584static const CGFunctionInfo &
586 CodeGenModule &CGM,
587 const CallArgList &args,
588 const FunctionType *fnType,
589 unsigned numExtraRequiredArgs,
590 bool chainCall) {
591 assert(args.size() >= numExtraRequiredArgs);
592
594
595 // In most cases, there are no optional arguments.
597
598 // If we have a variadic prototype, the required arguments are the
599 // extra prefix plus the arguments in the prototype.
600 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
601 if (proto->isVariadic())
602 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
603
604 if (proto->hasExtParameterInfos())
605 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
606 args.size());
607
608 // If we don't have a prototype at all, but we're supposed to
609 // explicitly use the variadic convention for unprototyped calls,
610 // treat all of the arguments as required but preserve the nominal
611 // possibility of variadics.
612 } else if (CGM.getTargetCodeGenInfo()
614 cast<FunctionNoProtoType>(fnType))) {
615 required = RequiredArgs(args.size());
616 }
617
618 // FIXME: Kill copy.
620 for (const auto &arg : args)
621 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
624 opts, argTypes, fnType->getExtInfo(),
625 paramInfos, required);
626}
627
628/// Figure out the rules for calling a function with the given formal
629/// type using the given arguments. The arguments are necessary
630/// because the function might be unprototyped, in which case it's
631/// target-dependent in crazy ways.
632const CGFunctionInfo &
634 const FunctionType *fnType,
635 bool chainCall) {
636 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
637 chainCall ? 1 : 0, chainCall);
638}
639
640/// A block function is essentially a free function with an
641/// extra implicit argument.
642const CGFunctionInfo &
644 const FunctionType *fnType) {
645 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
646 /*chainCall=*/false);
647}
648
649const CGFunctionInfo &
651 const FunctionArgList &params) {
652 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
653 auto argTypes = getArgTypesForDeclaration(Context, params);
654
656 FnInfoOpts::None, argTypes,
657 proto->getExtInfo(), paramInfos,
659}
660
661const CGFunctionInfo &
663 const CallArgList &args) {
664 // FIXME: Kill copy.
666 for (const auto &Arg : args)
667 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
669 argTypes, FunctionType::ExtInfo(),
670 /*paramInfos=*/{}, RequiredArgs::All);
671}
672
673const CGFunctionInfo &
675 const FunctionArgList &args) {
676 auto argTypes = getArgTypesForDeclaration(Context, args);
677
679 argTypes, FunctionType::ExtInfo(), {},
681}
682
683const CGFunctionInfo &
685 ArrayRef<CanQualType> argTypes) {
686 return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::None, argTypes,
689}
690
691/// Arrange a call to a C++ method, passing the given arguments.
692///
693/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
694/// does not count `this`.
695const CGFunctionInfo &
697 const FunctionProtoType *proto,
698 RequiredArgs required,
699 unsigned numPrefixArgs) {
700 assert(numPrefixArgs + 1 <= args.size() &&
701 "Emitting a call with less args than the required prefix?");
702 // Add one to account for `this`. It's a bit awkward here, but we don't count
703 // `this` in similar places elsewhere.
704 auto paramInfos =
705 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
706
707 // FIXME: Kill copy.
708 auto argTypes = getArgTypesForCall(Context, args);
709
710 FunctionType::ExtInfo info = proto->getExtInfo();
712 FnInfoOpts::IsInstanceMethod, argTypes, info,
713 paramInfos, required);
714}
715
718 std::nullopt, FunctionType::ExtInfo(), {},
720}
721
722const CGFunctionInfo &
724 const CallArgList &args) {
725 assert(signature.arg_size() <= args.size());
726 if (signature.arg_size() == args.size())
727 return signature;
728
730 auto sigParamInfos = signature.getExtParameterInfos();
731 if (!sigParamInfos.empty()) {
732 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
733 paramInfos.resize(args.size());
734 }
735
736 auto argTypes = getArgTypesForCall(Context, args);
737
738 assert(signature.getRequiredArgs().allowsOptionalArgs());
740 if (signature.isInstanceMethod())
742 if (signature.isChainCall())
744 if (signature.isDelegateCall())
746 return arrangeLLVMFunctionInfo(signature.getReturnType(), opts, argTypes,
747 signature.getExtInfo(), paramInfos,
748 signature.getRequiredArgs());
749}
750
751namespace clang {
752namespace CodeGen {
754}
755}
756
757/// Arrange the argument and result information for an abstract value
758/// of a given function type. This is the method which all of the
759/// above functions ultimately defer to.
761 CanQualType resultType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes,
764 RequiredArgs required) {
765 assert(llvm::all_of(argTypes,
766 [](CanQualType T) { return T.isCanonicalAsParam(); }));
767
768 // Lookup or create unique function info.
769 llvm::FoldingSetNodeID ID;
770 bool isInstanceMethod =
772 bool isChainCall =
774 bool isDelegateCall =
776 CGFunctionInfo::Profile(ID, isInstanceMethod, isChainCall, isDelegateCall,
777 info, paramInfos, required, resultType, argTypes);
778
779 void *insertPos = nullptr;
780 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
781 if (FI)
782 return *FI;
783
784 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
785
786 // Construct the function info. We co-allocate the ArgInfos.
787 FI = CGFunctionInfo::create(CC, isInstanceMethod, isChainCall, isDelegateCall,
788 info, paramInfos, resultType, argTypes, required);
789 FunctionInfos.InsertNode(FI, insertPos);
790
791 bool inserted = FunctionsBeingProcessed.insert(FI).second;
792 (void)inserted;
793 assert(inserted && "Recursively being processed?");
794
795 // Compute ABI information.
796 if (CC == llvm::CallingConv::SPIR_KERNEL) {
797 // Force target independent argument handling for the host visible
798 // kernel functions.
799 computeSPIRKernelABIInfo(CGM, *FI);
800 } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) {
802 } else {
803 getABIInfo().computeInfo(*FI);
804 }
805
806 // Loop over all of the computed argument and return value info. If any of
807 // them are direct or extend without a specified coerce type, specify the
808 // default now.
809 ABIArgInfo &retInfo = FI->getReturnInfo();
810 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
812
813 for (auto &I : FI->arguments())
814 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
815 I.info.setCoerceToType(ConvertType(I.type));
816
817 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
818 assert(erased && "Not in set?");
819
820 return *FI;
821}
822
823CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod,
824 bool chainCall, bool delegateCall,
825 const FunctionType::ExtInfo &info,
827 CanQualType resultType,
828 ArrayRef<CanQualType> argTypes,
829 RequiredArgs required) {
830 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
831 assert(!required.allowsOptionalArgs() ||
832 required.getNumRequiredArgs() <= argTypes.size());
833
834 void *buffer =
835 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
836 argTypes.size() + 1, paramInfos.size()));
837
838 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
839 FI->CallingConvention = llvmCC;
840 FI->EffectiveCallingConvention = llvmCC;
841 FI->ASTCallingConvention = info.getCC();
842 FI->InstanceMethod = instanceMethod;
843 FI->ChainCall = chainCall;
844 FI->DelegateCall = delegateCall;
845 FI->CmseNSCall = info.getCmseNSCall();
846 FI->NoReturn = info.getNoReturn();
847 FI->ReturnsRetained = info.getProducesResult();
848 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
849 FI->NoCfCheck = info.getNoCfCheck();
850 FI->Required = required;
851 FI->HasRegParm = info.getHasRegParm();
852 FI->RegParm = info.getRegParm();
853 FI->ArgStruct = nullptr;
854 FI->ArgStructAlign = 0;
855 FI->NumArgs = argTypes.size();
856 FI->HasExtParameterInfos = !paramInfos.empty();
857 FI->getArgsBuffer()[0].type = resultType;
858 FI->MaxVectorWidth = 0;
859 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
860 FI->getArgsBuffer()[i + 1].type = argTypes[i];
861 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
862 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
863 return FI;
864}
865
866/***/
867
868namespace {
869// ABIArgInfo::Expand implementation.
870
871// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
872struct TypeExpansion {
873 enum TypeExpansionKind {
874 // Elements of constant arrays are expanded recursively.
875 TEK_ConstantArray,
876 // Record fields are expanded recursively (but if record is a union, only
877 // the field with the largest size is expanded).
878 TEK_Record,
879 // For complex types, real and imaginary parts are expanded recursively.
881 // All other types are not expandable.
882 TEK_None
883 };
884
885 const TypeExpansionKind Kind;
886
887 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
888 virtual ~TypeExpansion() {}
889};
890
891struct ConstantArrayExpansion : TypeExpansion {
892 QualType EltTy;
893 uint64_t NumElts;
894
895 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
896 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
897 static bool classof(const TypeExpansion *TE) {
898 return TE->Kind == TEK_ConstantArray;
899 }
900};
901
902struct RecordExpansion : TypeExpansion {
904
906
907 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
909 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
910 Fields(std::move(Fields)) {}
911 static bool classof(const TypeExpansion *TE) {
912 return TE->Kind == TEK_Record;
913 }
914};
915
916struct ComplexExpansion : TypeExpansion {
917 QualType EltTy;
918
919 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
920 static bool classof(const TypeExpansion *TE) {
921 return TE->Kind == TEK_Complex;
922 }
923};
924
925struct NoExpansion : TypeExpansion {
926 NoExpansion() : TypeExpansion(TEK_None) {}
927 static bool classof(const TypeExpansion *TE) {
928 return TE->Kind == TEK_None;
929 }
930};
931} // namespace
932
933static std::unique_ptr<TypeExpansion>
935 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
936 return std::make_unique<ConstantArrayExpansion>(
937 AT->getElementType(), AT->getSize().getZExtValue());
938 }
939 if (const RecordType *RT = Ty->getAs<RecordType>()) {
942 const RecordDecl *RD = RT->getDecl();
943 assert(!RD->hasFlexibleArrayMember() &&
944 "Cannot expand structure with flexible array.");
945 if (RD->isUnion()) {
946 // Unions can be here only in degenerative cases - all the fields are same
947 // after flattening. Thus we have to use the "largest" field.
948 const FieldDecl *LargestFD = nullptr;
949 CharUnits UnionSize = CharUnits::Zero();
950
951 for (const auto *FD : RD->fields()) {
952 if (FD->isZeroLengthBitField(Context))
953 continue;
954 assert(!FD->isBitField() &&
955 "Cannot expand structure with bit-field members.");
956 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
957 if (UnionSize < FieldSize) {
958 UnionSize = FieldSize;
959 LargestFD = FD;
960 }
961 }
962 if (LargestFD)
963 Fields.push_back(LargestFD);
964 } else {
965 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
966 assert(!CXXRD->isDynamicClass() &&
967 "cannot expand vtable pointers in dynamic classes");
968 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
969 }
970
971 for (const auto *FD : RD->fields()) {
972 if (FD->isZeroLengthBitField(Context))
973 continue;
974 assert(!FD->isBitField() &&
975 "Cannot expand structure with bit-field members.");
976 Fields.push_back(FD);
977 }
978 }
979 return std::make_unique<RecordExpansion>(std::move(Bases),
980 std::move(Fields));
981 }
982 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
983 return std::make_unique<ComplexExpansion>(CT->getElementType());
984 }
985 return std::make_unique<NoExpansion>();
986}
987
988static int getExpansionSize(QualType Ty, const ASTContext &Context) {
989 auto Exp = getTypeExpansion(Ty, Context);
990 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
991 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
992 }
993 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
994 int Res = 0;
995 for (auto BS : RExp->Bases)
996 Res += getExpansionSize(BS->getType(), Context);
997 for (auto FD : RExp->Fields)
998 Res += getExpansionSize(FD->getType(), Context);
999 return Res;
1000 }
1001 if (isa<ComplexExpansion>(Exp.get()))
1002 return 2;
1003 assert(isa<NoExpansion>(Exp.get()));
1004 return 1;
1005}
1006
1007void
1010 auto Exp = getTypeExpansion(Ty, Context);
1011 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1012 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
1013 getExpandedTypes(CAExp->EltTy, TI);
1014 }
1015 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1016 for (auto BS : RExp->Bases)
1017 getExpandedTypes(BS->getType(), TI);
1018 for (auto FD : RExp->Fields)
1019 getExpandedTypes(FD->getType(), TI);
1020 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1021 llvm::Type *EltTy = ConvertType(CExp->EltTy);
1022 *TI++ = EltTy;
1023 *TI++ = EltTy;
1024 } else {
1025 assert(isa<NoExpansion>(Exp.get()));
1026 *TI++ = ConvertType(Ty);
1027 }
1028}
1029
1031 ConstantArrayExpansion *CAE,
1032 Address BaseAddr,
1033 llvm::function_ref<void(Address)> Fn) {
1034 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1035 CharUnits EltAlign =
1036 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1037 llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy);
1038
1039 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1040 llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
1041 BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
1042 Fn(Address(EltAddr, EltTy, EltAlign));
1043 }
1044}
1045
1046void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1047 llvm::Function::arg_iterator &AI) {
1048 assert(LV.isSimple() &&
1049 "Unexpected non-simple lvalue during struct expansion.");
1050
1051 auto Exp = getTypeExpansion(Ty, getContext());
1052 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1054 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1055 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1056 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1057 });
1058 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1059 Address This = LV.getAddress(*this);
1060 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1061 // Perform a single step derived-to-base conversion.
1062 Address Base =
1063 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1064 /*NullCheckValue=*/false, SourceLocation());
1065 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1066
1067 // Recurse onto bases.
1068 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1069 }
1070 for (auto FD : RExp->Fields) {
1071 // FIXME: What are the right qualifiers here?
1073 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1074 }
1075 } else if (isa<ComplexExpansion>(Exp.get())) {
1076 auto realValue = &*AI++;
1077 auto imagValue = &*AI++;
1078 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1079 } else {
1080 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1081 // primitive store.
1082 assert(isa<NoExpansion>(Exp.get()));
1083 llvm::Value *Arg = &*AI++;
1084 if (LV.isBitField()) {
1086 } else {
1087 // TODO: currently there are some places are inconsistent in what LLVM
1088 // pointer type they use (see D118744). Once clang uses opaque pointers
1089 // all LLVM pointer types will be the same and we can remove this check.
1090 if (Arg->getType()->isPointerTy()) {
1091 Address Addr = LV.getAddress(*this);
1092 Arg = Builder.CreateBitCast(Arg, Addr.getElementType());
1093 }
1094 EmitStoreOfScalar(Arg, LV);
1095 }
1096 }
1097}
1098
1099void CodeGenFunction::ExpandTypeToArgs(
1100 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1101 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1102 auto Exp = getTypeExpansion(Ty, getContext());
1103 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1104 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1107 *this, CAExp, Addr, [&](Address EltAddr) {
1108 CallArg EltArg = CallArg(
1109 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1110 CAExp->EltTy);
1111 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1112 IRCallArgPos);
1113 });
1114 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1115 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1117 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1118 // Perform a single step derived-to-base conversion.
1119 Address Base =
1120 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1121 /*NullCheckValue=*/false, SourceLocation());
1122 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1123
1124 // Recurse onto bases.
1125 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1126 IRCallArgPos);
1127 }
1128
1129 LValue LV = MakeAddrLValue(This, Ty);
1130 for (auto FD : RExp->Fields) {
1131 CallArg FldArg =
1132 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1133 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1134 IRCallArgPos);
1135 }
1136 } else if (isa<ComplexExpansion>(Exp.get())) {
1138 IRCallArgs[IRCallArgPos++] = CV.first;
1139 IRCallArgs[IRCallArgPos++] = CV.second;
1140 } else {
1141 assert(isa<NoExpansion>(Exp.get()));
1142 auto RV = Arg.getKnownRValue();
1143 assert(RV.isScalar() &&
1144 "Unexpected non-scalar rvalue during struct expansion.");
1145
1146 // Insert a bitcast as needed.
1147 llvm::Value *V = RV.getScalarVal();
1148 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1149 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1150 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1151
1152 IRCallArgs[IRCallArgPos++] = V;
1153 }
1154}
1155
1156/// Create a temporary allocation for the purposes of coercion.
1158 CharUnits MinAlign,
1159 const Twine &Name = "tmp") {
1160 // Don't use an alignment that's worse than what LLVM would prefer.
1161 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty);
1162 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1163
1164 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1165}
1166
1167/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1168/// accessing some number of bytes out of it, try to gep into the struct to get
1169/// at its inner goodness. Dive as deep as possible without entering an element
1170/// with an in-memory size smaller than DstSize.
1171static Address
1173 llvm::StructType *SrcSTy,
1174 uint64_t DstSize, CodeGenFunction &CGF) {
1175 // We can't dive into a zero-element struct.
1176 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1177
1178 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1179
1180 // If the first elt is at least as large as what we're looking for, or if the
1181 // first element is the same size as the whole struct, we can enter it. The
1182 // comparison must be made on the store size and not the alloca size. Using
1183 // the alloca size may overstate the size of the load.
1184 uint64_t FirstEltSize =
1185 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1186 if (FirstEltSize < DstSize &&
1187 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1188 return SrcPtr;
1189
1190 // GEP into the first element.
1191 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1192
1193 // If the first element is a struct, recurse.
1194 llvm::Type *SrcTy = SrcPtr.getElementType();
1195 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1196 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1197
1198 return SrcPtr;
1199}
1200
1201/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1202/// are either integers or pointers. This does a truncation of the value if it
1203/// is too large or a zero extension if it is too small.
1204///
1205/// This behaves as if the value were coerced through memory, so on big-endian
1206/// targets the high bits are preserved in a truncation, while little-endian
1207/// targets preserve the low bits.
1208static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1209 llvm::Type *Ty,
1210 CodeGenFunction &CGF) {
1211 if (Val->getType() == Ty)
1212 return Val;
1213
1214 if (isa<llvm::PointerType>(Val->getType())) {
1215 // If this is Pointer->Pointer avoid conversion to and from int.
1216 if (isa<llvm::PointerType>(Ty))
1217 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1218
1219 // Convert the pointer to an integer so we can play with its width.
1220 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1221 }
1222
1223 llvm::Type *DestIntTy = Ty;
1224 if (isa<llvm::PointerType>(DestIntTy))
1225 DestIntTy = CGF.IntPtrTy;
1226
1227 if (Val->getType() != DestIntTy) {
1228 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1229 if (DL.isBigEndian()) {
1230 // Preserve the high bits on big-endian targets.
1231 // That is what memory coercion does.
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1233 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1234
1235 if (SrcSize > DstSize) {
1236 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1237 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1238 } else {
1239 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1240 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1241 }
1242 } else {
1243 // Little-endian targets preserve the low bits. No shifts required.
1244 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1245 }
1246 }
1247
1248 if (isa<llvm::PointerType>(Ty))
1249 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1250 return Val;
1251}
1252
1253
1254
1255/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1256/// a pointer to an object of type \arg Ty, known to be aligned to
1257/// \arg SrcAlign bytes.
1258///
1259/// This safely handles the case when the src type is smaller than the
1260/// destination type; in this situation the values of bits which not
1261/// present in the src are undefined.
1262static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1263 CodeGenFunction &CGF) {
1264 llvm::Type *SrcTy = Src.getElementType();
1265
1266 // If SrcTy and Ty are the same, just do a load.
1267 if (SrcTy == Ty)
1268 return CGF.Builder.CreateLoad(Src);
1269
1270 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1271
1272 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1273 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1274 DstSize.getFixedValue(), CGF);
1275 SrcTy = Src.getElementType();
1276 }
1277
1278 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1279
1280 // If the source and destination are integer or pointer types, just do an
1281 // extension or truncation to the desired type.
1282 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1283 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1284 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1285 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1286 }
1287
1288 // If load is legal, just bitcast the src pointer.
1289 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1290 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1291 // Generally SrcSize is never greater than DstSize, since this means we are
1292 // losing bits. However, this can happen in cases where the structure has
1293 // additional padding, for example due to a user specified alignment.
1294 //
1295 // FIXME: Assert that we aren't truncating non-padding bits when have access
1296 // to that information.
1297 Src = Src.withElementType(Ty);
1298 return CGF.Builder.CreateLoad(Src);
1299 }
1300
1301 // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1302 // the types match, use the llvm.vector.insert intrinsic to perform the
1303 // conversion.
1304 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1305 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1306 // If we are casting a fixed i8 vector to a scalable i1 predicate
1307 // vector, use a vector insert and bitcast the result.
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1311 ScalableDstTy = llvm::ScalableVectorType::get(
1312 FixedSrcTy->getElementType(),
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1314 }
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1316 auto *Load = CGF.Builder.CreateLoad(Src);
1317 auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
1318 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1319 llvm::Value *Result = CGF.Builder.CreateInsertVector(
1320 ScalableDstTy, UndefVec, Load, Zero, "cast.scalable");
1321 if (ScalableDstTy != Ty)
1322 Result = CGF.Builder.CreateBitCast(Result, Ty);
1323 return Result;
1324 }
1325 }
1326 }
1327
1328 // Otherwise do coercion through memory. This is stupid, but simple.
1329 Address Tmp =
1330 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1332 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1333 Src.getAlignment().getAsAlign(),
1334 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue()));
1335 return CGF.Builder.CreateLoad(Tmp);
1336}
1337
1338// Function to store a first-class aggregate into memory. We prefer to
1339// store the elements rather than the aggregate to be more friendly to
1340// fast-isel.
1341// FIXME: Do we need to recurse here?
1342void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1343 bool DestIsVolatile) {
1344 // Prefer scalar stores to first-class aggregate stores.
1345 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1346 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1347 Address EltPtr = Builder.CreateStructGEP(Dest, i);
1348 llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1349 Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1350 }
1351 } else {
1352 Builder.CreateStore(Val, Dest, DestIsVolatile);
1353 }
1354}
1355
1356/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1357/// where the source and destination may have different types. The
1358/// destination is known to be aligned to \arg DstAlign bytes.
1359///
1360/// This safely handles the case when the src type is larger than the
1361/// destination type; the upper bits of the src will be lost.
1362static void CreateCoercedStore(llvm::Value *Src,
1363 Address Dst,
1364 bool DstIsVolatile,
1365 CodeGenFunction &CGF) {
1366 llvm::Type *SrcTy = Src->getType();
1367 llvm::Type *DstTy = Dst.getElementType();
1368 if (SrcTy == DstTy) {
1369 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1370 return;
1371 }
1372
1373 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1374
1375 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1376 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1377 SrcSize.getFixedValue(), CGF);
1378 DstTy = Dst.getElementType();
1379 }
1380
1381 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1382 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1383 if (SrcPtrTy && DstPtrTy &&
1384 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1385 Src = CGF.Builder.CreateAddrSpaceCast(Src, DstTy);
1386 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1387 return;
1388 }
1389
1390 // If the source and destination are integer or pointer types, just do an
1391 // extension or truncation to the desired type.
1392 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1393 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1394 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1395 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1396 return;
1397 }
1398
1399 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1400
1401 // If store is legal, just bitcast the src pointer.
1402 if (isa<llvm::ScalableVectorType>(SrcTy) ||
1403 isa<llvm::ScalableVectorType>(DstTy) ||
1404 SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
1405 Dst = Dst.withElementType(SrcTy);
1406 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1407 } else {
1408 // Otherwise do coercion through memory. This is stupid, but
1409 // simple.
1410
1411 // Generally SrcSize is never greater than DstSize, since this means we are
1412 // losing bits. However, this can happen in cases where the structure has
1413 // additional padding, for example due to a user specified alignment.
1414 //
1415 // FIXME: Assert that we aren't truncating non-padding bits when have access
1416 // to that information.
1417 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1418 CGF.Builder.CreateStore(Src, Tmp);
1420 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1421 Tmp.getAlignment().getAsAlign(),
1422 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue()));
1423 }
1424}
1425
1427 const ABIArgInfo &info) {
1428 if (unsigned offset = info.getDirectOffset()) {
1429 addr = addr.withElementType(CGF.Int8Ty);
1430 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1431 CharUnits::fromQuantity(offset));
1432 addr = addr.withElementType(info.getCoerceToType());
1433 }
1434 return addr;
1435}
1436
1437namespace {
1438
1439/// Encapsulates information about the way function arguments from
1440/// CGFunctionInfo should be passed to actual LLVM IR function.
1441class ClangToLLVMArgMapping {
1442 static const unsigned InvalidIndex = ~0U;
1443 unsigned InallocaArgNo;
1444 unsigned SRetArgNo;
1445 unsigned TotalIRArgs;
1446
1447 /// Arguments of LLVM IR function corresponding to single Clang argument.
1448 struct IRArgs {
1449 unsigned PaddingArgIndex;
1450 // Argument is expanded to IR arguments at positions
1451 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1452 unsigned FirstArgIndex;
1453 unsigned NumberOfArgs;
1454
1455 IRArgs()
1456 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1457 NumberOfArgs(0) {}
1458 };
1459
1460 SmallVector<IRArgs, 8> ArgInfo;
1461
1462public:
1463 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1464 bool OnlyRequiredArgs = false)
1465 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1466 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1467 construct(Context, FI, OnlyRequiredArgs);
1468 }
1469
1470 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1471 unsigned getInallocaArgNo() const {
1472 assert(hasInallocaArg());
1473 return InallocaArgNo;
1474 }
1475
1476 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1477 unsigned getSRetArgNo() const {
1478 assert(hasSRetArg());
1479 return SRetArgNo;
1480 }
1481
1482 unsigned totalIRArgs() const { return TotalIRArgs; }
1483
1484 bool hasPaddingArg(unsigned ArgNo) const {
1485 assert(ArgNo < ArgInfo.size());
1486 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1487 }
1488 unsigned getPaddingArgNo(unsigned ArgNo) const {
1489 assert(hasPaddingArg(ArgNo));
1490 return ArgInfo[ArgNo].PaddingArgIndex;
1491 }
1492
1493 /// Returns index of first IR argument corresponding to ArgNo, and their
1494 /// quantity.
1495 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1496 assert(ArgNo < ArgInfo.size());
1497 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1498 ArgInfo[ArgNo].NumberOfArgs);
1499 }
1500
1501private:
1502 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1503 bool OnlyRequiredArgs);
1504};
1505
1506void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1507 const CGFunctionInfo &FI,
1508 bool OnlyRequiredArgs) {
1509 unsigned IRArgNo = 0;
1510 bool SwapThisWithSRet = false;
1511 const ABIArgInfo &RetAI = FI.getReturnInfo();
1512
1513 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1514 SwapThisWithSRet = RetAI.isSRetAfterThis();
1515 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1516 }
1517
1518 unsigned ArgNo = 0;
1519 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1520 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1521 ++I, ++ArgNo) {
1522 assert(I != FI.arg_end());
1523 QualType ArgType = I->type;
1524 const ABIArgInfo &AI = I->info;
1525 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1526 auto &IRArgs = ArgInfo[ArgNo];
1527
1528 if (AI.getPaddingType())
1529 IRArgs.PaddingArgIndex = IRArgNo++;
1530
1531 switch (AI.getKind()) {
1532 case ABIArgInfo::Extend:
1533 case ABIArgInfo::Direct: {
1534 // FIXME: handle sseregparm someday...
1535 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1536 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1537 IRArgs.NumberOfArgs = STy->getNumElements();
1538 } else {
1539 IRArgs.NumberOfArgs = 1;
1540 }
1541 break;
1542 }
1545 IRArgs.NumberOfArgs = 1;
1546 break;
1547 case ABIArgInfo::Ignore:
1549 // ignore and inalloca doesn't have matching LLVM parameters.
1550 IRArgs.NumberOfArgs = 0;
1551 break;
1553 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1554 break;
1555 case ABIArgInfo::Expand:
1556 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1557 break;
1558 }
1559
1560 if (IRArgs.NumberOfArgs > 0) {
1561 IRArgs.FirstArgIndex = IRArgNo;
1562 IRArgNo += IRArgs.NumberOfArgs;
1563 }
1564
1565 // Skip over the sret parameter when it comes second. We already handled it
1566 // above.
1567 if (IRArgNo == 1 && SwapThisWithSRet)
1568 IRArgNo++;
1569 }
1570 assert(ArgNo == ArgInfo.size());
1571
1572 if (FI.usesInAlloca())
1573 InallocaArgNo = IRArgNo++;
1574
1575 TotalIRArgs = IRArgNo;
1576}
1577} // namespace
1578
1579/***/
1580
1582 const auto &RI = FI.getReturnInfo();
1583 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1584}
1585
1587 return ReturnTypeUsesSRet(FI) &&
1589}
1590
1592 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1593 switch (BT->getKind()) {
1594 default:
1595 return false;
1596 case BuiltinType::Float:
1598 case BuiltinType::Double:
1600 case BuiltinType::LongDouble:
1602 }
1603 }
1604
1605 return false;
1606}
1607
1609 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1610 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1611 if (BT->getKind() == BuiltinType::LongDouble)
1613 }
1614 }
1615
1616 return false;
1617}
1618
1621 return GetFunctionType(FI);
1622}
1623
1624llvm::FunctionType *
1626
1627 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1628 (void)Inserted;
1629 assert(Inserted && "Recursively being processed?");
1630
1631 llvm::Type *resultType = nullptr;
1632 const ABIArgInfo &retAI = FI.getReturnInfo();
1633 switch (retAI.getKind()) {
1634 case ABIArgInfo::Expand:
1636 llvm_unreachable("Invalid ABI kind for return argument");
1637
1638 case ABIArgInfo::Extend:
1639 case ABIArgInfo::Direct:
1640 resultType = retAI.getCoerceToType();
1641 break;
1642
1644 if (retAI.getInAllocaSRet()) {
1645 // sret things on win32 aren't void, they return the sret pointer.
1646 QualType ret = FI.getReturnType();
1647 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1648 resultType = llvm::PointerType::get(getLLVMContext(), addressSpace);
1649 } else {
1650 resultType = llvm::Type::getVoidTy(getLLVMContext());
1651 }
1652 break;
1653
1655 case ABIArgInfo::Ignore:
1656 resultType = llvm::Type::getVoidTy(getLLVMContext());
1657 break;
1658
1660 resultType = retAI.getUnpaddedCoerceAndExpandType();
1661 break;
1662 }
1663
1664 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1665 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1666
1667 // Add type for sret argument.
1668 if (IRFunctionArgs.hasSRetArg()) {
1669 QualType Ret = FI.getReturnType();
1670 unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret);
1671 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1672 llvm::PointerType::get(getLLVMContext(), AddressSpace);
1673 }
1674
1675 // Add type for inalloca argument.
1676 if (IRFunctionArgs.hasInallocaArg())
1677 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1678 llvm::PointerType::getUnqual(getLLVMContext());
1679
1680 // Add in all of the required arguments.
1681 unsigned ArgNo = 0;
1683 ie = it + FI.getNumRequiredArgs();
1684 for (; it != ie; ++it, ++ArgNo) {
1685 const ABIArgInfo &ArgInfo = it->info;
1686
1687 // Insert a padding type to ensure proper alignment.
1688 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1689 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1690 ArgInfo.getPaddingType();
1691
1692 unsigned FirstIRArg, NumIRArgs;
1693 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1694
1695 switch (ArgInfo.getKind()) {
1696 case ABIArgInfo::Ignore:
1698 assert(NumIRArgs == 0);
1699 break;
1700
1702 assert(NumIRArgs == 1);
1703 // indirect arguments are always on the stack, which is alloca addr space.
1704 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1705 getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
1706 break;
1708 assert(NumIRArgs == 1);
1709 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1711 break;
1712 case ABIArgInfo::Extend:
1713 case ABIArgInfo::Direct: {
1714 // Fast-isel and the optimizer generally like scalar values better than
1715 // FCAs, so we flatten them if this is safe to do for this argument.
1716 llvm::Type *argType = ArgInfo.getCoerceToType();
1717 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1718 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1719 assert(NumIRArgs == st->getNumElements());
1720 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1721 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1722 } else {
1723 assert(NumIRArgs == 1);
1724 ArgTypes[FirstIRArg] = argType;
1725 }
1726 break;
1727 }
1728
1730 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1731 for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1732 *ArgTypesIter++ = EltTy;
1733 }
1734 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1735 break;
1736 }
1737
1738 case ABIArgInfo::Expand:
1739 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1740 getExpandedTypes(it->type, ArgTypesIter);
1741 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1742 break;
1743 }
1744 }
1745
1746 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1747 assert(Erased && "Not in set?");
1748
1749 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1750}
1751
1753 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1754 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1755
1756 if (!isFuncTypeConvertible(FPT))
1757 return llvm::StructType::get(getLLVMContext());
1758
1759 return GetFunctionType(GD);
1760}
1761
1763 llvm::AttrBuilder &FuncAttrs,
1764 const FunctionProtoType *FPT) {
1765 if (!FPT)
1766 return;
1767
1769 FPT->isNothrow())
1770 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1771
1772 unsigned SMEBits = FPT->getAArch64SMEAttributes();
1774 FuncAttrs.addAttribute("aarch64_pstate_sm_enabled");
1776 FuncAttrs.addAttribute("aarch64_pstate_sm_compatible");
1777
1778 // ZA
1780 FuncAttrs.addAttribute("aarch64_preserves_za");
1782 FuncAttrs.addAttribute("aarch64_in_za");
1784 FuncAttrs.addAttribute("aarch64_out_za");
1786 FuncAttrs.addAttribute("aarch64_inout_za");
1787
1788 // ZT0
1790 FuncAttrs.addAttribute("aarch64_preserves_zt0");
1792 FuncAttrs.addAttribute("aarch64_in_zt0");
1794 FuncAttrs.addAttribute("aarch64_out_zt0");
1796 FuncAttrs.addAttribute("aarch64_inout_zt0");
1797}
1798
1799static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs,
1800 const Decl *Callee) {
1801 if (!Callee)
1802 return;
1803
1805
1806 for (const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1807 AA->getAssumption().split(Attrs, ",");
1808
1809 if (!Attrs.empty())
1810 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1811 llvm::join(Attrs.begin(), Attrs.end(), ","));
1812}
1813
1815 QualType ReturnType) const {
1816 // We can't just discard the return value for a record type with a
1817 // complex destructor or a non-trivially copyable type.
1818 if (const RecordType *RT =
1819 ReturnType.getCanonicalType()->getAs<RecordType>()) {
1820 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1821 return ClassDecl->hasTrivialDestructor();
1822 }
1823 return ReturnType.isTriviallyCopyableType(Context);
1824}
1825
1827 const Decl *TargetDecl) {
1828 // As-is msan can not tolerate noundef mismatch between caller and
1829 // implementation. Mismatch is possible for e.g. indirect calls from C-caller
1830 // into C++. Such mismatches lead to confusing false reports. To avoid
1831 // expensive workaround on msan we enforce initialization event in uncommon
1832 // cases where it's allowed.
1833 if (Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1834 return true;
1835 // C++ explicitly makes returning undefined values UB. C's rule only applies
1836 // to used values, so we never mark them noundef for now.
1837 if (!Module.getLangOpts().CPlusPlus)
1838 return false;
1839 if (TargetDecl) {
1840 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1841 if (FDecl->isExternC())
1842 return false;
1843 } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1844 // Function pointer.
1845 if (VDecl->isExternC())
1846 return false;
1847 }
1848 }
1849
1850 // We don't want to be too aggressive with the return checking, unless
1851 // it's explicit in the code opts or we're using an appropriate sanitizer.
1852 // Try to respect what the programmer intended.
1853 return Module.getCodeGenOpts().StrictReturn ||
1854 !Module.MayDropFunctionReturn(Module.getContext(), RetTy) ||
1855 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1856}
1857
1858/// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the
1859/// requested denormal behavior, accounting for the overriding behavior of the
1860/// -f32 case.
1861static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode,
1862 llvm::DenormalMode FP32DenormalMode,
1863 llvm::AttrBuilder &FuncAttrs) {
1864 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1865 FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str());
1866
1867 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1868 FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str());
1869}
1870
1871/// Add default attributes to a function, which have merge semantics under
1872/// -mlink-builtin-bitcode and should not simply overwrite any existing
1873/// attributes in the linked library.
1874static void
1876 llvm::AttrBuilder &FuncAttrs) {
1877 addDenormalModeAttrs(CodeGenOpts.FPDenormalMode, CodeGenOpts.FP32DenormalMode,
1878 FuncAttrs);
1879}
1880
1882 StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts,
1883 const LangOptions &LangOpts, bool AttrOnCallSite,
1884 llvm::AttrBuilder &FuncAttrs) {
1885 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1886 if (!HasOptnone) {
1887 if (CodeGenOpts.OptimizeSize)
1888 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1889 if (CodeGenOpts.OptimizeSize == 2)
1890 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1891 }
1892
1893 if (CodeGenOpts.DisableRedZone)
1894 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1895 if (CodeGenOpts.IndirectTlsSegRefs)
1896 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1897 if (CodeGenOpts.NoImplicitFloat)
1898 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1899
1900 if (AttrOnCallSite) {
1901 // Attributes that should go on the call site only.
1902 // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking
1903 // the -fno-builtin-foo list.
1904 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
1905 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1906 if (!CodeGenOpts.TrapFuncName.empty())
1907 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1908 } else {
1909 switch (CodeGenOpts.getFramePointer()) {
1911 // This is the default behavior.
1912 break;
1915 FuncAttrs.addAttribute("frame-pointer",
1917 CodeGenOpts.getFramePointer()));
1918 }
1919
1920 if (CodeGenOpts.LessPreciseFPMAD)
1921 FuncAttrs.addAttribute("less-precise-fpmad", "true");
1922
1923 if (CodeGenOpts.NullPointerIsValid)
1924 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1925
1927 FuncAttrs.addAttribute("no-trapping-math", "true");
1928
1929 // TODO: Are these all needed?
1930 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1931 if (LangOpts.NoHonorInfs)
1932 FuncAttrs.addAttribute("no-infs-fp-math", "true");
1933 if (LangOpts.NoHonorNaNs)
1934 FuncAttrs.addAttribute("no-nans-fp-math", "true");
1935 if (LangOpts.ApproxFunc)
1936 FuncAttrs.addAttribute("approx-func-fp-math", "true");
1937 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1938 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1939 (LangOpts.getDefaultFPContractMode() ==
1941 LangOpts.getDefaultFPContractMode() ==
1943 FuncAttrs.addAttribute("unsafe-fp-math", "true");
1944 if (CodeGenOpts.SoftFloat)
1945 FuncAttrs.addAttribute("use-soft-float", "true");
1946 FuncAttrs.addAttribute("stack-protector-buffer-size",
1947 llvm::utostr(CodeGenOpts.SSPBufferSize));
1948 if (LangOpts.NoSignedZero)
1949 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1950
1951 // TODO: Reciprocal estimate codegen options should apply to instructions?
1952 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1953 if (!Recips.empty())
1954 FuncAttrs.addAttribute("reciprocal-estimates",
1955 llvm::join(Recips, ","));
1956
1957 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1958 CodeGenOpts.PreferVectorWidth != "none")
1959 FuncAttrs.addAttribute("prefer-vector-width",
1960 CodeGenOpts.PreferVectorWidth);
1961
1962 if (CodeGenOpts.StackRealignment)
1963 FuncAttrs.addAttribute("stackrealign");
1964 if (CodeGenOpts.Backchain)
1965 FuncAttrs.addAttribute("backchain");
1966 if (CodeGenOpts.EnableSegmentedStacks)
1967 FuncAttrs.addAttribute("split-stack");
1968
1969 if (CodeGenOpts.SpeculativeLoadHardening)
1970 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1971
1972 // Add zero-call-used-regs attribute.
1973 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1974 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1975 FuncAttrs.removeAttribute("zero-call-used-regs");
1976 break;
1977 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1978 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg");
1979 break;
1980 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1981 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr");
1982 break;
1983 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1984 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg");
1985 break;
1986 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1987 FuncAttrs.addAttribute("zero-call-used-regs", "used");
1988 break;
1989 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1990 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg");
1991 break;
1992 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1993 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr");
1994 break;
1995 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1996 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg");
1997 break;
1998 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
1999 FuncAttrs.addAttribute("zero-call-used-regs", "all");
2000 break;
2001 }
2002 }
2003
2004 if (LangOpts.assumeFunctionsAreConvergent()) {
2005 // Conservatively, mark all functions and calls in CUDA and OpenCL as
2006 // convergent (meaning, they may call an intrinsically convergent op, such
2007 // as __syncthreads() / barrier(), and so can't have certain optimizations
2008 // applied around them). LLVM will remove this attribute where it safely
2009 // can.
2010 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2011 }
2012
2013 // TODO: NoUnwind attribute should be added for other GPU modes HIP,
2014 // OpenMP offload. AFAIK, neither of them support exceptions in device code.
2015 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2016 LangOpts.SYCLIsDevice) {
2017 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2018 }
2019
2020 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
2021 StringRef Var, Value;
2022 std::tie(Var, Value) = Attr.split('=');
2023 FuncAttrs.addAttribute(Var, Value);
2024 }
2025}
2026
2027/// Merges `target-features` from \TargetOpts and \F, and sets the result in
2028/// \FuncAttr
2029/// * features from \F are always kept
2030/// * a feature from \TargetOpts is kept if itself and its opposite are absent
2031/// from \F
2032static void
2034 const llvm::Function &F,
2035 const TargetOptions &TargetOpts) {
2036 auto FFeatures = F.getFnAttribute("target-features");
2037
2038 llvm::StringSet<> MergedNames;
2039 SmallVector<StringRef> MergedFeatures;
2040 MergedFeatures.reserve(TargetOpts.Features.size());
2041
2042 auto AddUnmergedFeatures = [&](auto &&FeatureRange) {
2043 for (StringRef Feature : FeatureRange) {
2044 if (Feature.empty())
2045 continue;
2046 assert(Feature[0] == '+' || Feature[0] == '-');
2047 StringRef Name = Feature.drop_front(1);
2048 bool Merged = !MergedNames.insert(Name).second;
2049 if (!Merged)
2050 MergedFeatures.push_back(Feature);
2051 }
2052 };
2053
2054 if (FFeatures.isValid())
2055 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(), ','));
2056 AddUnmergedFeatures(TargetOpts.Features);
2057
2058 if (!MergedFeatures.empty()) {
2059 llvm::sort(MergedFeatures);
2060 FuncAttr.addAttribute("target-features", llvm::join(MergedFeatures, ","));
2061 }
2062}
2063
2065 llvm::Function &F, const CodeGenOptions &CodeGenOpts,
2066 const LangOptions &LangOpts, const TargetOptions &TargetOpts,
2067 bool WillInternalize) {
2068
2069 llvm::AttrBuilder FuncAttrs(F.getContext());
2070 // Here we only extract the options that are relevant compared to the version
2071 // from GetCPUAndFeaturesAttributes.
2072 if (!TargetOpts.CPU.empty())
2073 FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU);
2074 if (!TargetOpts.TuneCPU.empty())
2075 FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU);
2076
2077 ::getTrivialDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
2078 CodeGenOpts, LangOpts,
2079 /*AttrOnCallSite=*/false, FuncAttrs);
2080
2081 if (!WillInternalize && F.isInterposable()) {
2082 // Do not promote "dynamic" denormal-fp-math to this translation unit's
2083 // setting for weak functions that won't be internalized. The user has no
2084 // real control for how builtin bitcode is linked, so we shouldn't assume
2085 // later copies will use a consistent mode.
2086 F.addFnAttrs(FuncAttrs);
2087 return;
2088 }
2089
2090 llvm::AttributeMask AttrsToRemove;
2091
2092 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2093 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2094 llvm::DenormalMode Merged =
2095 CodeGenOpts.FPDenormalMode.mergeCalleeMode(DenormModeToMerge);
2096 llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode;
2097
2098 if (DenormModeToMergeF32.isValid()) {
2099 MergedF32 =
2100 CodeGenOpts.FP32DenormalMode.mergeCalleeMode(DenormModeToMergeF32);
2101 }
2102
2103 if (Merged == llvm::DenormalMode::getDefault()) {
2104 AttrsToRemove.addAttribute("denormal-fp-math");
2105 } else if (Merged != DenormModeToMerge) {
2106 // Overwrite existing attribute
2107 FuncAttrs.addAttribute("denormal-fp-math",
2108 CodeGenOpts.FPDenormalMode.str());
2109 }
2110
2111 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2112 AttrsToRemove.addAttribute("denormal-fp-math-f32");
2113 } else if (MergedF32 != DenormModeToMergeF32) {
2114 // Overwrite existing attribute
2115 FuncAttrs.addAttribute("denormal-fp-math-f32",
2116 CodeGenOpts.FP32DenormalMode.str());
2117 }
2118
2119 F.removeFnAttrs(AttrsToRemove);
2120 addDenormalModeAttrs(Merged, MergedF32, FuncAttrs);
2121
2122 overrideFunctionFeaturesWithTargetFeatures(FuncAttrs, F, TargetOpts);
2123
2124 F.addFnAttrs(FuncAttrs);
2125}
2126
2127void CodeGenModule::getTrivialDefaultFunctionAttributes(
2128 StringRef Name, bool HasOptnone, bool AttrOnCallSite,
2129 llvm::AttrBuilder &FuncAttrs) {
2130 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, getCodeGenOpts(),
2131 getLangOpts(), AttrOnCallSite,
2132 FuncAttrs);
2133}
2134
2135void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2136 bool HasOptnone,
2137 bool AttrOnCallSite,
2138 llvm::AttrBuilder &FuncAttrs) {
2139 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2140 FuncAttrs);
2141 // If we're just getting the default, get the default values for mergeable
2142 // attributes.
2143 if (!AttrOnCallSite)
2144 addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs);
2145}
2146
2148 llvm::AttrBuilder &attrs) {
2149 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
2150 /*for call*/ false, attrs);
2151 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
2152}
2153
2154static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
2155 const LangOptions &LangOpts,
2156 const NoBuiltinAttr *NBA = nullptr) {
2157 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2158 SmallString<32> AttributeName;
2159 AttributeName += "no-builtin-";
2160 AttributeName += BuiltinName;
2161 FuncAttrs.addAttribute(AttributeName);
2162 };
2163
2164 // First, handle the language options passed through -fno-builtin.
2165 if (LangOpts.NoBuiltin) {
2166 // -fno-builtin disables them all.
2167 FuncAttrs.addAttribute("no-builtins");
2168 return;
2169 }
2170
2171 // Then, add attributes for builtins specified through -fno-builtin-<name>.
2172 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
2173
2174 // Now, let's check the __attribute__((no_builtin("...")) attribute added to
2175 // the source.
2176 if (!NBA)
2177 return;
2178
2179 // If there is a wildcard in the builtin names specified through the
2180 // attribute, disable them all.
2181 if (llvm::is_contained(NBA->builtinNames(), "*")) {
2182 FuncAttrs.addAttribute("no-builtins");
2183 return;
2184 }
2185
2186 // And last, add the rest of the builtin names.
2187 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2188}
2189
2191 const llvm::DataLayout &DL, const ABIArgInfo &AI,
2192 bool CheckCoerce = true) {
2193 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2194 if (AI.getKind() == ABIArgInfo::Indirect ||
2196 return true;
2197 if (AI.getKind() == ABIArgInfo::Extend)
2198 return true;
2199 if (!DL.typeSizeEqualsStoreSize(Ty))
2200 // TODO: This will result in a modest amount of values not marked noundef
2201 // when they could be. We care about values that *invisibly* contain undef
2202 // bits from the perspective of LLVM IR.
2203 return false;
2204 if (CheckCoerce && AI.canHaveCoerceToType()) {
2205 llvm::Type *CoerceTy = AI.getCoerceToType();
2206 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2207 DL.getTypeSizeInBits(Ty)))
2208 // If we're coercing to a type with a greater size than the canonical one,
2209 // we're introducing new undef bits.
2210 // Coercing to a type of smaller or equal size is ok, as we know that
2211 // there's no internal padding (typeSizeEqualsStoreSize).
2212 return false;
2213 }
2214 if (QTy->isBitIntType())
2215 return true;
2216 if (QTy->isReferenceType())
2217 return true;
2218 if (QTy->isNullPtrType())
2219 return false;
2220 if (QTy->isMemberPointerType())
2221 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For
2222 // now, never mark them.
2223 return false;
2224 if (QTy->isScalarType()) {
2225 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy))
2226 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
2227 return true;
2228 }
2229 if (const VectorType *Vector = dyn_cast<VectorType>(QTy))
2230 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
2231 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2232 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
2233 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2234 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
2235
2236 // TODO: Some structs may be `noundef`, in specific situations.
2237 return false;
2238}
2239
2240/// Check if the argument of a function has maybe_undef attribute.
2241static bool IsArgumentMaybeUndef(const Decl *TargetDecl,
2242 unsigned NumRequiredArgs, unsigned ArgNo) {
2243 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2244 if (!FD)
2245 return false;
2246
2247 // Assume variadic arguments do not have maybe_undef attribute.
2248 if (ArgNo >= NumRequiredArgs)
2249 return false;
2250
2251 // Check if argument has maybe_undef attribute.
2252 if (ArgNo < FD->getNumParams()) {
2253 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2254 if (Param && Param->hasAttr<MaybeUndefAttr>())
2255 return true;
2256 }
2257
2258 return false;
2259}
2260
2261/// Test if it's legal to apply nofpclass for the given parameter type and it's
2262/// lowered IR type.
2263static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType,
2264 bool IsReturn) {
2265 // Should only apply to FP types in the source, not ABI promoted.
2266 if (!ParamType->hasFloatingRepresentation())
2267 return false;
2268
2269 // The promoted-to IR type also needs to support nofpclass.
2270 llvm::Type *IRTy = AI.getCoerceToType();
2271 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2272 return true;
2273
2274 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2275 return !IsReturn && AI.getCanBeFlattened() &&
2276 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2277 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2278 });
2279 }
2280
2281 return false;
2282}
2283
2284/// Return the nofpclass mask that can be applied to floating-point parameters.
2285static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) {
2286 llvm::FPClassTest Mask = llvm::fcNone;
2287 if (LangOpts.NoHonorInfs)
2288 Mask |= llvm::fcInf;
2289 if (LangOpts.NoHonorNaNs)
2290 Mask |= llvm::fcNan;
2291 return Mask;
2292}
2293
2295 CGCalleeInfo CalleeInfo,
2296 llvm::AttributeList &Attrs) {
2297 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2298 Attrs = Attrs.removeFnAttribute(getLLVMContext(), llvm::Attribute::Memory);
2299 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2300 getLLVMContext(), llvm::MemoryEffects::writeOnly());
2301 Attrs = Attrs.addFnAttribute(getLLVMContext(), MemoryAttr);
2302 }
2303}
2304
2305/// Construct the IR attribute list of a function or call.
2306///
2307/// When adding an attribute, please consider where it should be handled:
2308///
2309/// - getDefaultFunctionAttributes is for attributes that are essentially
2310/// part of the global target configuration (but perhaps can be
2311/// overridden on a per-function basis). Adding attributes there
2312/// will cause them to also be set in frontends that build on Clang's
2313/// target-configuration logic, as well as for code defined in library
2314/// modules such as CUDA's libdevice.
2315///
2316/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
2317/// and adds declaration-specific, convention-specific, and
2318/// frontend-specific logic. The last is of particular importance:
2319/// attributes that restrict how the frontend generates code must be
2320/// added here rather than getDefaultFunctionAttributes.
2321///
2323 const CGFunctionInfo &FI,
2324 CGCalleeInfo CalleeInfo,
2325 llvm::AttributeList &AttrList,
2326 unsigned &CallingConv,
2327 bool AttrOnCallSite, bool IsThunk) {
2328 llvm::AttrBuilder FuncAttrs(getLLVMContext());
2329 llvm::AttrBuilder RetAttrs(getLLVMContext());
2330
2331 // Collect function IR attributes from the CC lowering.
2332 // We'll collect the paramete and result attributes later.
2334 if (FI.isNoReturn())
2335 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2336 if (FI.isCmseNSCall())
2337 FuncAttrs.addAttribute("cmse_nonsecure_call");
2338
2339 // Collect function IR attributes from the callee prototype if we have one.
2341 CalleeInfo.getCalleeFunctionProtoType());
2342
2343 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
2344
2345 // Attach assumption attributes to the declaration. If this is a call
2346 // site, attach assumptions from the caller to the call as well.
2347 AddAttributesFromOMPAssumes(FuncAttrs, TargetDecl);
2348
2349 bool HasOptnone = false;
2350 // The NoBuiltinAttr attached to the target FunctionDecl.
2351 const NoBuiltinAttr *NBA = nullptr;
2352
2353 // Some ABIs may result in additional accesses to arguments that may
2354 // otherwise not be present.
2355 auto AddPotentialArgAccess = [&]() {
2356 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2357 if (A.isValid())
2358 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2359 llvm::MemoryEffects::argMemOnly());
2360 };
2361
2362 // Collect function IR attributes based on declaration-specific
2363 // information.
2364 // FIXME: handle sseregparm someday...
2365 if (TargetDecl) {
2366 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
2367 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2368 if (TargetDecl->hasAttr<NoThrowAttr>())
2369 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2370 if (TargetDecl->hasAttr<NoReturnAttr>())
2371 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2372 if (TargetDecl->hasAttr<ColdAttr>())
2373 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2374 if (TargetDecl->hasAttr<HotAttr>())
2375 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2376 if (TargetDecl->hasAttr<NoDuplicateAttr>())
2377 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2378 if (TargetDecl->hasAttr<ConvergentAttr>())
2379 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2380
2381 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2383 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
2384 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2385 // A sane operator new returns a non-aliasing pointer.
2386 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2387 if (getCodeGenOpts().AssumeSaneOperatorNew &&
2388 (Kind == OO_New || Kind == OO_Array_New))
2389 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2390 }
2391 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
2392 const bool IsVirtualCall = MD && MD->isVirtual();
2393 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
2394 // virtual function. These attributes are not inherited by overloads.
2395 if (!(AttrOnCallSite && IsVirtualCall)) {
2396 if (Fn->isNoReturn())
2397 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2398 NBA = Fn->getAttr<NoBuiltinAttr>();
2399 }
2400 }
2401
2402 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2403 // Only place nomerge attribute on call sites, never functions. This
2404 // allows it to work on indirect virtual function calls.
2405 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
2406 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2407 }
2408
2409 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
2410 if (TargetDecl->hasAttr<ConstAttr>()) {
2411 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2412 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2413 // gcc specifies that 'const' functions have greater restrictions than
2414 // 'pure' functions, so they also cannot have infinite loops.
2415 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2416 } else if (TargetDecl->hasAttr<PureAttr>()) {
2417 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2418 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2419 // gcc specifies that 'pure' functions cannot have infinite loops.
2420 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2421 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2422 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2423 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2424 }
2425 if (TargetDecl->hasAttr<RestrictAttr>())
2426 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2427 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2428 !CodeGenOpts.NullPointerIsValid)
2429 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2430 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2431 FuncAttrs.addAttribute("no_caller_saved_registers");
2432 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2433 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2434 if (TargetDecl->hasAttr<LeafAttr>())
2435 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2436
2437 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2438 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2439 std::optional<unsigned> NumElemsParam;
2440 if (AllocSize->getNumElemsParam().isValid())
2441 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2442 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2443 NumElemsParam);
2444 }
2445
2446 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2447 if (getLangOpts().OpenCLVersion <= 120) {
2448 // OpenCL v1.2 Work groups are always uniform
2449 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2450 } else {
2451 // OpenCL v2.0 Work groups may be whether uniform or not.
2452 // '-cl-uniform-work-group-size' compile option gets a hint
2453 // to the compiler that the global work-size be a multiple of
2454 // the work-group size specified to clEnqueueNDRangeKernel
2455 // (i.e. work groups are uniform).
2456 FuncAttrs.addAttribute(
2457 "uniform-work-group-size",
2458 llvm::toStringRef(getLangOpts().OffloadUniformBlock));
2459 }
2460 }
2461
2462 if (TargetDecl->hasAttr<CUDAGlobalAttr>() &&
2463 getLangOpts().OffloadUniformBlock)
2464 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2465
2466 if (TargetDecl->hasAttr<ArmLocallyStreamingAttr>())
2467 FuncAttrs.addAttribute("aarch64_pstate_sm_body");
2468 }
2469
2470 // Attach "no-builtins" attributes to:
2471 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2472 // * definitions: "no-builtins" or "no-builtin-<name>" only.
2473 // The attributes can come from:
2474 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2475 // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2476 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2477
2478 // Collect function IR attributes based on global settiings.
2479 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2480
2481 // Override some default IR attributes based on declaration-specific
2482 // information.
2483 if (TargetDecl) {
2484 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2485 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2486 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2487 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2488 if (TargetDecl->hasAttr<NoSplitStackAttr>())
2489 FuncAttrs.removeAttribute("split-stack");
2490 if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) {
2491 // A function "__attribute__((...))" overrides the command-line flag.
2492 auto Kind =
2493 TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2494 FuncAttrs.removeAttribute("zero-call-used-regs");
2495 FuncAttrs.addAttribute(
2496 "zero-call-used-regs",
2497 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2498 }
2499
2500 // Add NonLazyBind attribute to function declarations when -fno-plt
2501 // is used.
2502 // FIXME: what if we just haven't processed the function definition
2503 // yet, or if it's an external definition like C99 inline?
2504 if (CodeGenOpts.NoPLT) {
2505 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2506 if (!Fn->isDefined() && !AttrOnCallSite) {
2507 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2508 }
2509 }
2510 }
2511 }
2512
2513 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
2514 // functions with -funique-internal-linkage-names.
2515 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2516 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2517 if (!FD->isExternallyVisible())
2518 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2519 "selected");
2520 }
2521 }
2522
2523 // Collect non-call-site function IR attributes from declaration-specific
2524 // information.
2525 if (!AttrOnCallSite) {
2526 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2527 FuncAttrs.addAttribute("cmse_nonsecure_entry");
2528
2529 // Whether tail calls are enabled.
2530 auto shouldDisableTailCalls = [&] {
2531 // Should this be honored in getDefaultFunctionAttributes?
2532 if (CodeGenOpts.DisableTailCalls)
2533 return true;
2534
2535 if (!TargetDecl)
2536 return false;
2537
2538 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2539 TargetDecl->hasAttr<AnyX86InterruptAttr>())
2540 return true;
2541
2542 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2543 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2544 if (!BD->doesNotEscape())
2545 return true;
2546 }
2547
2548 return false;
2549 };
2550 if (shouldDisableTailCalls())
2551 FuncAttrs.addAttribute("disable-tail-calls", "true");
2552
2553 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
2554 // handles these separately to set them based on the global defaults.
2555 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2556 }
2557
2558 // Collect attributes from arguments and return values.
2559 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2560
2561 QualType RetTy = FI.getReturnType();
2562 const ABIArgInfo &RetAI = FI.getReturnInfo();
2563 const llvm::DataLayout &DL = getDataLayout();
2564
2565 // Determine if the return type could be partially undef
2566 if (CodeGenOpts.EnableNoundefAttrs &&
2567 HasStrictReturn(*this, RetTy, TargetDecl)) {
2568 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
2569 DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
2570 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2571 }
2572
2573 switch (RetAI.getKind()) {
2574 case ABIArgInfo::Extend:
2575 if (RetAI.isSignExt())
2576 RetAttrs.addAttribute(llvm::Attribute::SExt);
2577 else
2578 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2579 [[fallthrough]];
2580 case ABIArgInfo::Direct:
2581 if (RetAI.getInReg())
2582 RetAttrs.addAttribute(llvm::Attribute::InReg);
2583
2584 if (canApplyNoFPClass(RetAI, RetTy, true))
2585 RetAttrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
2586
2587 break;
2588 case ABIArgInfo::Ignore:
2589 break;
2590
2592 case ABIArgInfo::Indirect: {
2593 // inalloca and sret disable readnone and readonly
2594 AddPotentialArgAccess();
2595 break;
2596 }
2597
2599 break;
2600
2601 case ABIArgInfo::Expand:
2603 llvm_unreachable("Invalid ABI kind for return argument");
2604 }
2605
2606 if (!IsThunk) {
2607 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2608 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2609 QualType PTy = RefTy->getPointeeType();
2610 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2611 RetAttrs.addDereferenceableAttr(
2612 getMinimumObjectSize(PTy).getQuantity());
2613 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2614 !CodeGenOpts.NullPointerIsValid)
2615 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2616 if (PTy->isObjectType()) {
2617 llvm::Align Alignment =
2619 RetAttrs.addAlignmentAttr(Alignment);
2620 }
2621 }
2622 }
2623
2624 bool hasUsedSRet = false;
2625 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2626
2627 // Attach attributes to sret.
2628 if (IRFunctionArgs.hasSRetArg()) {
2629 llvm::AttrBuilder SRETAttrs(getLLVMContext());
2630 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2631 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2632 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2633 hasUsedSRet = true;
2634 if (RetAI.getInReg())
2635 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2636 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2637 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2638 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2639 }
2640
2641 // Attach attributes to inalloca argument.
2642 if (IRFunctionArgs.hasInallocaArg()) {
2643 llvm::AttrBuilder Attrs(getLLVMContext());
2644 Attrs.addInAllocaAttr(FI.getArgStruct());
2645 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2646 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2647 }
2648
2649 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument,
2650 // unless this is a thunk function.
2651 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2652 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2653 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2654 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2655
2656 assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2657
2658 llvm::AttrBuilder Attrs(getLLVMContext());
2659
2660 QualType ThisTy =
2662
2663 if (!CodeGenOpts.NullPointerIsValid &&
2664 getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2665 Attrs.addAttribute(llvm::Attribute::NonNull);
2666 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
2667 } else {
2668 // FIXME dereferenceable should be correct here, regardless of
2669 // NullPointerIsValid. However, dereferenceable currently does not always
2670 // respect NullPointerIsValid and may imply nonnull and break the program.
2671 // See https://reviews.llvm.org/D66618 for discussions.
2672 Attrs.addDereferenceableOrNullAttr(
2675 .getQuantity());
2676 }
2677
2678 llvm::Align Alignment =
2679 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr,
2680 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true)
2681 .getAsAlign();
2682 Attrs.addAlignmentAttr(Alignment);
2683
2684 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2685 }
2686
2687 unsigned ArgNo = 0;
2689 E = FI.arg_end();
2690 I != E; ++I, ++ArgNo) {
2691 QualType ParamType = I->type;
2692 const ABIArgInfo &AI = I->info;
2693 llvm::AttrBuilder Attrs(getLLVMContext());
2694
2695 // Add attribute for padding argument, if necessary.
2696 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2697 if (AI.getPaddingInReg()) {
2698 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2699 llvm::AttributeSet::get(
2701 llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2702 }
2703 }
2704
2705 // Decide whether the argument we're handling could be partially undef
2706 if (CodeGenOpts.EnableNoundefAttrs &&
2707 DetermineNoUndef(ParamType, getTypes(), DL, AI)) {
2708 Attrs.addAttribute(llvm::Attribute::NoUndef);
2709 }
2710
2711 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2712 // have the corresponding parameter variable. It doesn't make
2713 // sense to do it here because parameters are so messed up.
2714 switch (AI.getKind()) {
2715 case ABIArgInfo::Extend:
2716 if (AI.isSignExt())
2717 Attrs.addAttribute(llvm::Attribute::SExt);
2718 else
2719 Attrs.addAttribute(llvm::Attribute::ZExt);
2720 [[fallthrough]];
2721 case ABIArgInfo::Direct:
2722 if (ArgNo == 0 && FI.isChainCall())
2723 Attrs.addAttribute(llvm::Attribute::Nest);
2724 else if (AI.getInReg())
2725 Attrs.addAttribute(llvm::Attribute::InReg);
2726 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
2727
2728 if (canApplyNoFPClass(AI, ParamType, false))
2729 Attrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
2730 break;
2731 case ABIArgInfo::Indirect: {
2732 if (AI.getInReg())
2733 Attrs.addAttribute(llvm::Attribute::InReg);
2734
2735 if (AI.getIndirectByVal())
2736 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2737
2738 auto *Decl = ParamType->getAsRecordDecl();
2739 if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2740 Decl->getArgPassingRestrictions() ==
2742 // When calling the function, the pointer passed in will be the only
2743 // reference to the underlying object. Mark it accordingly.
2744 Attrs.addAttribute(llvm::Attribute::NoAlias);
2745
2746 // TODO: We could add the byref attribute if not byval, but it would
2747 // require updating many testcases.
2748
2749 CharUnits Align = AI.getIndirectAlign();
2750
2751 // In a byval argument, it is important that the required
2752 // alignment of the type is honored, as LLVM might be creating a
2753 // *new* stack object, and needs to know what alignment to give
2754 // it. (Sometimes it can deduce a sensible alignment on its own,
2755 // but not if clang decides it must emit a packed struct, or the
2756 // user specifies increased alignment requirements.)
2757 //
2758 // This is different from indirect *not* byval, where the object
2759 // exists already, and the align attribute is purely
2760 // informative.
2761 assert(!Align.isZero());
2762
2763 // For now, only add this when we have a byval argument.
2764 // TODO: be less lazy about updating test cases.
2765 if (AI.getIndirectByVal())
2766 Attrs.addAlignmentAttr(Align.getQuantity());
2767
2768 // byval disables readnone and readonly.
2769 AddPotentialArgAccess();
2770 break;
2771 }
2773 CharUnits Align = AI.getIndirectAlign();
2774 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2775 Attrs.addAlignmentAttr(Align.getQuantity());
2776 break;
2777 }
2778 case ABIArgInfo::Ignore:
2779 case ABIArgInfo::Expand:
2781 break;
2782
2784 // inalloca disables readnone and readonly.
2785 AddPotentialArgAccess();
2786 continue;
2787 }
2788
2789 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2790 QualType PTy = RefTy->getPointeeType();
2791 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2792 Attrs.addDereferenceableAttr(
2793 getMinimumObjectSize(PTy).getQuantity());
2794 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2795 !CodeGenOpts.NullPointerIsValid)
2796 Attrs.addAttribute(llvm::Attribute::NonNull);
2797 if (PTy->isObjectType()) {
2798 llvm::Align Alignment =
2800 Attrs.addAlignmentAttr(Alignment);
2801 }
2802 }
2803
2804 // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types:
2805 // > For arguments to a __kernel function declared to be a pointer to a
2806 // > data type, the OpenCL compiler can assume that the pointee is always
2807 // > appropriately aligned as required by the data type.
2808 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() &&
2809 ParamType->isPointerType()) {
2810 QualType PTy = ParamType->getPointeeType();
2811 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2812 llvm::Align Alignment =
2814 Attrs.addAlignmentAttr(Alignment);
2815 }
2816 }
2817
2818 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2820 break;
2821
2823 // Add 'sret' if we haven't already used it for something, but
2824 // only if the result is void.
2825 if (!hasUsedSRet && RetTy->isVoidType()) {
2826 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2827 hasUsedSRet = true;
2828 }
2829
2830 // Add 'noalias' in either case.
2831 Attrs.addAttribute(llvm::Attribute::NoAlias);
2832
2833 // Add 'dereferenceable' and 'alignment'.
2834 auto PTy = ParamType->getPointeeType();
2835 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2836 auto info = getContext().getTypeInfoInChars(PTy);
2837 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2838 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2839 }
2840 break;
2841 }
2842
2844 Attrs.addAttribute(llvm::Attribute::SwiftError);
2845 break;
2846
2848 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2849 break;
2850
2852 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2853 break;
2854 }
2855
2856 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2857 Attrs.addAttribute(llvm::Attribute::NoCapture);
2858
2859 if (Attrs.hasAttributes()) {
2860 unsigned FirstIRArg, NumIRArgs;
2861 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2862 for (unsigned i = 0; i < NumIRArgs; i++)
2863 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2864 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs));
2865 }
2866 }
2867 assert(ArgNo == FI.arg_size());
2868
2869 AttrList = llvm::AttributeList::get(
2870 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2871 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2872}
2873
2874/// An argument came in as a promoted argument; demote it back to its
2875/// declared type.
2876static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2877 const VarDecl *var,
2878 llvm::Value *value) {
2879 llvm::Type *varType = CGF.ConvertType(var->getType());
2880
2881 // This can happen with promotions that actually don't change the
2882 // underlying type, like the enum promotions.
2883 if (value->getType() == varType) return value;
2884
2885 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2886 && "unexpected promotion type");
2887
2888 if (isa<llvm::IntegerType>(varType))
2889 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2890
2891 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2892}
2893
2894/// Returns the attribute (either parameter attribute, or function
2895/// attribute), which declares argument ArgNo to be non-null.
2896static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2897 QualType ArgType, unsigned ArgNo) {
2898 // FIXME: __attribute__((nonnull)) can also be applied to:
2899 // - references to pointers, where the pointee is known to be
2900 // nonnull (apparently a Clang extension)
2901 // - transparent unions containing pointers
2902 // In the former case, LLVM IR cannot represent the constraint. In
2903 // the latter case, we have no guarantee that the transparent union
2904 // is in fact passed as a pointer.
2905 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2906 return nullptr;
2907 // First, check attribute on parameter itself.
2908 if (PVD) {
2909 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2910 return ParmNNAttr;
2911 }
2912 // Check function attributes.
2913 if (!FD)
2914 return nullptr;
2915 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2916 if (NNAttr->isNonNull(ArgNo))
2917 return NNAttr;
2918 }
2919 return nullptr;
2920}
2921
2922namespace {
2923 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2924 Address Temp;
2925 Address Arg;
2926 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2927 void Emit(CodeGenFunction &CGF, Flags flags) override {
2928 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2929 CGF.Builder.CreateStore(errorValue, Arg);
2930 }
2931 };
2932}
2933
2935 llvm::Function *Fn,
2936 const FunctionArgList &Args) {
2937 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2938 // Naked functions don't have prologues.
2939 return;
2940
2941 // If this is an implicit-return-zero function, go ahead and
2942 // initialize the return value. TODO: it might be nice to have
2943 // a more general mechanism for this that didn't require synthesized
2944 // return statements.
2945 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2946 if (FD->hasImplicitReturnZero()) {
2947 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2948 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2949 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2951 }
2952 }
2953
2954 // FIXME: We no longer need the types from FunctionArgList; lift up and
2955 // simplify.
2956
2957 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2958 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2959
2960 // If we're using inalloca, all the memory arguments are GEPs off of the last
2961 // parameter, which is a pointer to the complete memory area.
2962 Address ArgStruct = Address::invalid();
2963 if (IRFunctionArgs.hasInallocaArg())
2964 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2966
2967 // Name the struct return parameter.
2968 if (IRFunctionArgs.hasSRetArg()) {
2969 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2970 AI->setName("agg.result");
2971 AI->addAttr(llvm::Attribute::NoAlias);
2972 }
2973
2974 // Track if we received the parameter as a pointer (indirect, byval, or
2975 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2976 // into a local alloca for us.
2978 ArgVals.reserve(Args.size());
2979
2980 // Create a pointer value for every parameter declaration. This usually
2981 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2982 // any cleanups or do anything that might unwind. We do that separately, so
2983 // we can push the cleanups in the correct order for the ABI.
2984 assert(FI.arg_size() == Args.size() &&
2985 "Mismatch between function signature & arguments.");
2986 unsigned ArgNo = 0;
2988 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2989 i != e; ++i, ++info_it, ++ArgNo) {
2990 const VarDecl *Arg = *i;
2991 const ABIArgInfo &ArgI = info_it->info;
2992
2993 bool isPromoted =
2994 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2995 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2996 // the parameter is promoted. In this case we convert to
2997 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2998 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2999 assert(hasScalarEvaluationKind(Ty) ==
3001
3002 unsigned FirstIRArg, NumIRArgs;
3003 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3004
3005 switch (ArgI.getKind()) {
3006 case ABIArgInfo::InAlloca: {
3007 assert(NumIRArgs == 0);
3008 auto FieldIndex = ArgI.getInAllocaFieldIndex();
3009 Address V =
3010 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
3011 if (ArgI.getInAllocaIndirect())
3013 getContext().getTypeAlignInChars(Ty));
3014 ArgVals.push_back(ParamValue::forIndirect(V));
3015 break;
3016 }
3017
3020 assert(NumIRArgs == 1);
3021 Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
3023
3024 if (!hasScalarEvaluationKind(Ty)) {
3025 // Aggregates and complex variables are accessed by reference. All we
3026 // need to do is realign the value, if requested. Also, if the address
3027 // may be aliased, copy it to ensure that the parameter variable is
3028 // mutable and has a unique adress, as C requires.
3029 Address V = ParamAddr;
3030 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
3031 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
3032
3033 // Copy from the incoming argument pointer to the temporary with the
3034 // appropriate alignment.
3035 //
3036 // FIXME: We should have a common utility for generating an aggregate
3037 // copy.
3040 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
3041 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
3042 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
3043 V = AlignedTemp;
3044 }
3045 ArgVals.push_back(ParamValue::forIndirect(V));
3046 } else {
3047 // Load scalar value from indirect argument.
3048 llvm::Value *V =
3049 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
3050
3051 if (isPromoted)
3052 V = emitArgumentDemotion(*this, Arg, V);
3053 ArgVals.push_back(ParamValue::forDirect(V));
3054 }
3055 break;
3056 }
3057
3058 case ABIArgInfo::Extend:
3059 case ABIArgInfo::Direct: {
3060 auto AI = Fn->getArg(FirstIRArg);
3061 llvm::Type *LTy = ConvertType(Arg->getType());
3062
3063 // Prepare parameter attributes. So far, only attributes for pointer
3064 // parameters are prepared. See
3065 // http://llvm.org/docs/LangRef.html#paramattrs.
3066 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
3067 ArgI.getCoerceToType()->isPointerTy()) {
3068 assert(NumIRArgs == 1);
3069
3070 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3071 // Set `nonnull` attribute if any.
3072 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
3073 PVD->getFunctionScopeIndex()) &&
3074 !CGM.getCodeGenOpts().NullPointerIsValid)
3075 AI->addAttr(llvm::Attribute::NonNull);
3076
3077 QualType OTy = PVD->getOriginalType();
3078 if (const auto *ArrTy =
3079 getContext().getAsConstantArrayType(OTy)) {
3080 // A C99 array parameter declaration with the static keyword also
3081 // indicates dereferenceability, and if the size is constant we can
3082 // use the dereferenceable attribute (which requires the size in
3083 // bytes).
3084 if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) {
3085 QualType ETy = ArrTy->getElementType();
3086 llvm::Align Alignment =
3088 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3089 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
3090 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
3091 ArrSize) {
3092 llvm::AttrBuilder Attrs(getLLVMContext());
3093 Attrs.addDereferenceableAttr(
3094 getContext().getTypeSizeInChars(ETy).getQuantity() *
3095 ArrSize);
3096 AI->addAttrs(Attrs);
3097 } else if (getContext().getTargetInfo().getNullPointerValue(
3098 ETy.getAddressSpace()) == 0 &&
3099 !CGM.getCodeGenOpts().NullPointerIsValid) {
3100 AI->addAttr(llvm::Attribute::NonNull);
3101 }
3102 }
3103 } else if (const auto *ArrTy =
3104 getContext().getAsVariableArrayType(OTy)) {
3105 // For C99 VLAs with the static keyword, we don't know the size so
3106 // we can't use the dereferenceable attribute, but in addrspace(0)
3107 // we know that it must be nonnull.
3108 if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) {
3109 QualType ETy = ArrTy->getElementType();
3110 llvm::Align Alignment =
3112 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3113 if (!getTypes().getTargetAddressSpace(ETy) &&
3114 !CGM.getCodeGenOpts().NullPointerIsValid)
3115 AI->addAttr(llvm::Attribute::NonNull);
3116 }
3117 }
3118
3119 // Set `align` attribute if any.
3120 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3121 if (!AVAttr)
3122 if (const auto *TOTy = OTy->getAs<TypedefType>())
3123 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3124 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
3125 // If alignment-assumption sanitizer is enabled, we do *not* add
3126 // alignment attribute here, but emit normal alignment assumption,
3127 // so the UBSAN check could function.
3128 llvm::ConstantInt *AlignmentCI =
3129 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
3130 uint64_t AlignmentInt =
3131 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3132 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3133 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3134 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
3135 llvm::Align(AlignmentInt)));
3136 }
3137 }
3138 }
3139
3140 // Set 'noalias' if an argument type has the `restrict` qualifier.
3141 if (Arg->getType().isRestrictQualified())
3142 AI->addAttr(llvm::Attribute::NoAlias);
3143 }
3144
3145 // Prepare the argument value. If we have the trivial case, handle it
3146 // with no muss and fuss.
3147 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
3148 ArgI.getCoerceToType() == ConvertType(Ty) &&
3149 ArgI.getDirectOffset() == 0) {
3150 assert(NumIRArgs == 1);
3151
3152 // LLVM expects swifterror parameters to be used in very restricted
3153 // ways. Copy the value into a less-restricted temporary.
3154 llvm::Value *V = AI;
3155 if (FI.getExtParameterInfo(ArgNo).getABI()
3157 QualType pointeeTy = Ty->getPointeeType();
3158 assert(pointeeTy->isPointerType());
3159 Address temp =
3160 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3161 Address arg(V, ConvertTypeForMem(pointeeTy),
3162 getContext().getTypeAlignInChars(pointeeTy));
3163 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
3164 Builder.CreateStore(incomingErrorValue, temp);
3165 V = temp.getPointer();
3166
3167 // Push a cleanup to copy the value back at the end of the function.
3168 // The convention does not guarantee that the value will be written
3169 // back if the function exits with an unwind exception.
3170 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
3171 }
3172
3173 // Ensure the argument is the correct type.
3174 if (V->getType() != ArgI.getCoerceToType())
3175 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
3176
3177 if (isPromoted)
3178 V = emitArgumentDemotion(*this, Arg, V);
3179
3180 // Because of merging of function types from multiple decls it is
3181 // possible for the type of an argument to not match the corresponding
3182 // type in the function type. Since we are codegening the callee
3183 // in here, add a cast to the argument type.
3184 llvm::Type *LTy = ConvertType(Arg->getType());
3185 if (V->getType() != LTy)
3186 V = Builder.CreateBitCast(V, LTy);
3187
3188 ArgVals.push_back(ParamValue::forDirect(V));
3189 break;
3190 }
3191
3192 // VLST arguments are coerced to VLATs at the function boundary for
3193 // ABI consistency. If this is a VLST that was coerced to
3194 // a VLAT at the function boundary and the types match up, use
3195 // llvm.vector.extract to convert back to the original VLST.
3196 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
3197 llvm::Value *Coerced = Fn->getArg(FirstIRArg);
3198 if (auto *VecTyFrom =
3199 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3200 // If we are casting a scalable i1 predicate vector to a fixed i8
3201 // vector, bitcast the source and use a vector extract.
3202 if (VecTyFrom->getElementType()->isIntegerTy(1) &&
3203 VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
3204 VecTyTo->getElementType() == Builder.getInt8Ty()) {
3205 VecTyFrom = llvm::ScalableVectorType::get(
3206 VecTyTo->getElementType(),
3207 VecTyFrom->getElementCount().getKnownMinValue() / 8);
3208 Coerced = Builder.CreateBitCast(Coerced, VecTyFrom);
3209 }
3210 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3211 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
3212
3213 assert(NumIRArgs == 1);
3214 Coerced->setName(Arg->getName() + ".coerce");
3215 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
3216 VecTyTo, Coerced, Zero, "cast.fixed")));
3217 break;
3218 }
3219 }
3220 }
3221
3222 llvm::StructType *STy =
3223 dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
3224 if (ArgI.isDirect() && !ArgI.getCanBeFlattened() && STy &&
3225 STy->getNumElements() > 1) {
3226 [[maybe_unused]] llvm::TypeSize StructSize =
3227 CGM.getDataLayout().getTypeAllocSize(STy);
3228 [[maybe_unused]] llvm::TypeSize PtrElementSize =
3229 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(Ty));
3230 if (STy->containsHomogeneousScalableVectorTypes()) {
3231 assert(StructSize == PtrElementSize &&
3232 "Only allow non-fractional movement of structure with"
3233 "homogeneous scalable vector type");
3234
3235 ArgVals.push_back(ParamValue::forDirect(AI));
3236 break;
3237 }
3238 }
3239
3240 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
3241 Arg->getName());
3242
3243 // Pointer to store into.
3244 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
3245
3246 // Fast-isel and the optimizer generally like scalar values better than
3247 // FCAs, so we flatten them if this is safe to do for this argument.
3248 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
3249 STy->getNumElements() > 1) {
3250 llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
3251 llvm::TypeSize PtrElementSize =
3252 CGM.getDataLayout().getTypeAllocSize(Ptr.getElementType());
3253 if (StructSize.isScalable()) {
3254 assert(STy->containsHomogeneousScalableVectorTypes() &&
3255 "ABI only supports structure with homogeneous scalable vector "
3256 "type");
3257 assert(StructSize == PtrElementSize &&
3258 "Only allow non-fractional movement of structure with"
3259 "homogeneous scalable vector type");
3260 assert(STy->getNumElements() == NumIRArgs);
3261
3262 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3263 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3264 auto *AI = Fn->getArg(FirstIRArg + i);
3265 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3266 LoadedStructValue =
3267 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3268 }
3269
3270 Builder.CreateStore(LoadedStructValue, Ptr);
3271 } else {
3272 uint64_t SrcSize = StructSize.getFixedValue();
3273 uint64_t DstSize = PtrElementSize.getFixedValue();
3274
3275 Address AddrToStoreInto = Address::invalid();
3276 if (SrcSize <= DstSize) {
3277 AddrToStoreInto = Ptr.withElementType(STy);
3278 } else {
3279 AddrToStoreInto =
3280 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
3281 }
3282
3283 assert(STy->getNumElements() == NumIRArgs);
3284 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3285 auto AI = Fn->getArg(FirstIRArg + i);
3286 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3287 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
3288 Builder.CreateStore(AI, EltPtr);
3289 }
3290
3291 if (SrcSize > DstSize) {
3292 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3293 }
3294 }
3295 } else {
3296 // Simple case, just do a coerced store of the argument into the alloca.
3297 assert(NumIRArgs == 1);
3298 auto AI = Fn->getArg(FirstIRArg);
3299 AI->setName(Arg->getName() + ".coerce");
3300 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
3301 }
3302
3303 // Match to what EmitParmDecl is expecting for this type.
3305 llvm::Value *V =
3306 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
3307 if (isPromoted)
3308 V = emitArgumentDemotion(*this, Arg, V);
3309 ArgVals.push_back(ParamValue::forDirect(V));
3310 } else {
3311 ArgVals.push_back(ParamValue::forIndirect(Alloca));
3312 }
3313 break;
3314 }
3315
3317 // Reconstruct into a temporary.
3318 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
3319 ArgVals.push_back(ParamValue::forIndirect(alloca));
3320
3321 auto coercionType = ArgI.getCoerceAndExpandType();
3322 alloca = alloca.withElementType(coercionType);
3323
3324 unsigned argIndex = FirstIRArg;
3325 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3326 llvm::Type *eltType = coercionType->getElementType(i);
3328 continue;
3329
3330 auto eltAddr = Builder.CreateStructGEP(alloca, i);
3331 auto elt = Fn->getArg(argIndex++);
3332 Builder.CreateStore(elt, eltAddr);
3333 }
3334 assert(argIndex == FirstIRArg + NumIRArgs);
3335 break;
3336 }
3337
3338 case ABIArgInfo::Expand: {
3339 // If this structure was expanded into multiple arguments then
3340 // we need to create a temporary and reconstruct it from the
3341 // arguments.
3342 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
3343 LValue LV = MakeAddrLValue(Alloca, Ty);
3344 ArgVals.push_back(ParamValue::forIndirect(Alloca));
3345
3346 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3347 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3348 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3349 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3350 auto AI = Fn->getArg(FirstIRArg + i);
3351 AI->setName(Arg->getName() + "." + Twine(i));
3352 }
3353 break;
3354 }
3355
3356 case ABIArgInfo::Ignore:
3357 assert(NumIRArgs == 0);
3358 // Initialize the local variable appropriately.
3359 if (!hasScalarEvaluationKind(Ty)) {
3360 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
3361 } else {
3362 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
3363 ArgVals.push_back(ParamValue::forDirect(U));
3364 }
3365 break;
3366 }
3367 }
3368
3369 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3370 for (int I = Args.size() - 1; I >= 0; --I)
3371 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
3372 } else {
3373 for (unsigned I = 0, E = Args.size(); I != E; ++I)
3374 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
3375 }
3376}
3377
3378static void eraseUnusedBitCasts(llvm::Instruction *insn) {
3379 while (insn->use_empty()) {
3380 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3381 if (!bitcast) return;
3382
3383 // This is "safe" because we would have used a ConstantExpr otherwise.
3384 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3385 bitcast->eraseFromParent();
3386 }
3387}
3388
3389/// Try to emit a fused autorelease of a return result.
3391 llvm::Value *result) {
3392 // We must be immediately followed the cast.
3393 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
3394 if (BB->empty()) return nullptr;
3395 if (&BB->back() != result) return nullptr;
3396
3397 llvm::Type *resultType = result->getType();
3398
3399 // result is in a BasicBlock and is therefore an Instruction.
3400 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3401
3403
3404 // Look for:
3405 // %generator = bitcast %type1* %generator2 to %type2*
3406 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3407 // We would have emitted this as a constant if the operand weren't
3408 // an Instruction.
3409 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3410
3411 // Require the generator to be immediately followed by the cast.
3412 if (generator->getNextNode() != bitcast)
3413 return nullptr;
3414
3415 InstsToKill.push_back(bitcast);
3416 }
3417
3418 // Look for:
3419 // %generator = call i8* @objc_retain(i8* %originalResult)
3420 // or
3421 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
3422 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3423 if (!call) return nullptr;
3424
3425 bool doRetainAutorelease;
3426
3427 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
3428 doRetainAutorelease = true;
3429 } else if (call->getCalledOperand() ==
3431 doRetainAutorelease = false;
3432
3433 // If we emitted an assembly marker for this call (and the
3434 // ARCEntrypoints field should have been set if so), go looking
3435 // for that call. If we can't find it, we can't do this
3436 // optimization. But it should always be the immediately previous
3437 // instruction, unless we needed bitcasts around the call.
3439 llvm::Instruction *prev = call->getPrevNode();
3440 assert(prev);
3441 if (isa<llvm::BitCastInst>(prev)) {
3442 prev = prev->getPrevNode();
3443 assert(prev);
3444 }
3445 assert(isa<llvm::CallInst>(prev));
3446 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3448 InstsToKill.push_back(prev);
3449 }
3450 } else {
3451 return nullptr;
3452 }
3453
3454 result = call->getArgOperand(0);
3455 InstsToKill.push_back(call);
3456
3457 // Keep killing bitcasts, for sanity. Note that we no longer care
3458 // about precise ordering as long as there's exactly one use.
3459 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3460 if (!bitcast->hasOneUse()) break;
3461 InstsToKill.push_back(bitcast);
3462 result = bitcast->getOperand(0);
3463 }
3464
3465 // Delete all the unnecessary instructions, from latest to earliest.
3466 for (auto *I : InstsToKill)
3467 I->eraseFromParent();
3468
3469 // Do the fused retain/autorelease if we were asked to.
3470 if (doRetainAutorelease)
3471 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
3472
3473 // Cast back to the result type.
3474 return CGF.Builder.CreateBitCast(result, resultType);
3475}
3476
3477/// If this is a +1 of the value of an immutable 'self', remove it.
3479 llvm::Value *result) {
3480 // This is only applicable to a method with an immutable 'self'.
3481 const ObjCMethodDecl *method =
3482 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
3483 if (!method) return nullptr;
3484 const VarDecl *self = method->getSelfDecl();
3485 if (!self->getType().isConstQualified()) return nullptr;
3486
3487 // Look for a retain call. Note: stripPointerCasts looks through returned arg
3488 // functions, which would cause us to miss the retain.
3489 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3490 if (!retainCall || retainCall->getCalledOperand() !=
3492 return nullptr;
3493
3494 // Look for an ordinary load of 'self'.
3495 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3496 llvm::LoadInst *load =
3497 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3498 if (!load || load->isAtomic() || load->isVolatile() ||
3499 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
3500 return nullptr;
3501
3502 // Okay! Burn it all down. This relies for correctness on the
3503 // assumption that the retain is emitted as part of the return and
3504 // that thereafter everything is used "linearly".
3505 llvm::Type *resultType = result->getType();
3506 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
3507 assert(retainCall->use_empty());
3508 retainCall->eraseFromParent();
3509 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
3510
3511 return CGF.Builder.CreateBitCast(load, resultType);
3512}
3513
3514/// Emit an ARC autorelease of the result of a function.
3515///
3516/// \return the value to actually return from the function
3518 llvm::Value *result) {
3519 // If we're returning 'self', kill the initial retain. This is a
3520 // heuristic attempt to "encourage correctness" in the really unfortunate
3521 // case where we have a return of self during a dealloc and we desperately
3522 // need to avoid the possible autorelease.
3523 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
3524 return self;
3525
3526 // At -O0, try to emit a fused retain/autorelease.
3527 if (CGF.shouldUseFusedARCCalls())
3528 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
3529 return fused;
3530
3531 return CGF.EmitARCAutoreleaseReturnValue(result);
3532}
3533
3534/// Heuristically search for a dominating store to the return-value slot.
3536 // Check if a User is a store which pointerOperand is the ReturnValue.
3537 // We are looking for stores to the ReturnValue, not for stores of the
3538 // ReturnValue to some other location.
3539 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3540 auto *SI = dyn_cast<llvm::StoreInst>(U);
3541 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() ||
3542 SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
3543 return nullptr;
3544 // These aren't actually possible for non-coerced returns, and we
3545 // only care about non-coerced returns on this code path.
3546 // All memory instructions inside __try block are volatile.
3547 assert(!SI->isAtomic() &&
3548 (!SI->isVolatile() || CGF.currentFunctionUsesSEHTry()));
3549 return SI;
3550 };
3551 // If there are multiple uses of the return-value slot, just check
3552 // for something immediately preceding the IP. Sometimes this can
3553 // happen with how we generate implicit-returns; it can also happen
3554 // with noreturn cleanups.
3555 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3556 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3557 if (IP->empty()) return nullptr;
3558
3559 // Look at directly preceding instruction, skipping bitcasts and lifetime
3560 // markers.
3561 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3562 if (isa<llvm::BitCastInst>(&I))
3563 continue;
3564 if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3565 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3566 continue;
3567
3568 return GetStoreIfValid(&I);
3569 }
3570 return nullptr;
3571 }
3572
3573 llvm::StoreInst *store =
3574 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3575 if (!store) return nullptr;
3576
3577 // Now do a first-and-dirty dominance check: just walk up the
3578 // single-predecessors chain from the current insertion point.
3579 llvm::BasicBlock *StoreBB = store->getParent();
3580 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3582 while (IP != StoreBB) {
3583 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3584 return nullptr;
3585 }
3586
3587 // Okay, the store's basic block dominates the insertion point; we
3588 // can do our thing.
3589 return store;
3590}
3591
3592// Helper functions for EmitCMSEClearRecord
3593
3594// Set the bits corresponding to a field having width `BitWidth` and located at
3595// offset `BitOffset` (from the least significant bit) within a storage unit of
3596// `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3597// Use little-endian layout, i.e.`Bits[0]` is the LSB.
3598static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3599 int BitWidth, int CharWidth) {
3600 assert(CharWidth <= 64);
3601 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3602
3603 int Pos = 0;
3604 if (BitOffset >= CharWidth) {
3605 Pos += BitOffset / CharWidth;
3606 BitOffset = BitOffset % CharWidth;
3607 }
3608
3609 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3610 if (BitOffset + BitWidth >= CharWidth) {
3611 Bits[Pos++] |= (Used << BitOffset) & Used;
3612 BitWidth -= CharWidth - BitOffset;
3613 BitOffset = 0;
3614 }
3615
3616 while (BitWidth >= CharWidth) {
3617 Bits[Pos++] = Used;
3618 BitWidth -= CharWidth;
3619 }
3620
3621 if (BitWidth > 0)
3622 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3623}
3624
3625// Set the bits corresponding to a field having width `BitWidth` and located at
3626// offset `BitOffset` (from the least significant bit) within a storage unit of
3627// `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3628// `Bits` corresponds to one target byte. Use target endian layout.
3629static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3630 int StorageSize, int BitOffset, int BitWidth,
3631 int CharWidth, bool BigEndian) {
3632
3633 SmallVector<uint64_t, 8> TmpBits(StorageSize);
3634 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3635
3636 if (BigEndian)
3637 std::reverse(TmpBits.begin(), TmpBits.end());
3638
3639 for (uint64_t V : TmpBits)
3640 Bits[StorageOffset++] |= V;
3641}
3642
3643static void setUsedBits(CodeGenModule &, QualType, int,
3645
3646// Set the bits in `Bits`, which correspond to the value representations of
3647// the actual members of the record type `RTy`. Note that this function does
3648// not handle base classes, virtual tables, etc, since they cannot happen in
3649// CMSE function arguments or return. The bit mask corresponds to the target
3650// memory layout, i.e. it's endian dependent.
3651static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3653 ASTContext &Context = CGM.getContext();
3654 int CharWidth = Context.getCharWidth();
3655 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3656 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3657 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3658
3659 int Idx = 0;
3660 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3661 const FieldDecl *F = *I;
3662
3663 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3665 continue;
3666
3667 if (F->isBitField()) {
3668 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3669 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3670 BFI.StorageSize / CharWidth, BFI.Offset,
3671 BFI.Size, CharWidth,
3672 CGM.getDataLayout().isBigEndian());
3673 continue;
3674 }
3675
3676 setUsedBits(CGM, F->getType(),
3677 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3678 }
3679}
3680
3681// Set the bits in `Bits`, which correspond to the value representations of
3682// the elements of an array type `ATy`.
3683static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3684 int Offset, SmallVectorImpl<uint64_t> &Bits) {
3685 const ASTContext &Context = CGM.getContext();
3686
3687 QualType ETy = Context.getBaseElementType(ATy);
3688 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3689 SmallVector<uint64_t, 4> TmpBits(Size);
3690 setUsedBits(CGM, ETy, 0, TmpBits);
3691
3692 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3693 auto Src = TmpBits.begin();
3694 auto Dst = Bits.begin() + Offset + I * Size;
3695 for (int J = 0; J < Size; ++J)
3696 *Dst++ |= *Src++;
3697 }
3698}
3699
3700// Set the bits in `Bits`, which correspond to the value representations of
3701// the type `QTy`.
3702static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3704 if (const auto *RTy = QTy->getAs<RecordType>())
3705 return setUsedBits(CGM, RTy, Offset, Bits);
3706
3707 ASTContext &Context = CGM.getContext();
3708 if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3709 return setUsedBits(CGM, ATy, Offset, Bits);
3710
3711 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3712 if (Size <= 0)
3713 return;
3714
3715 std::fill_n(Bits.begin() + Offset, Size,
3716 (uint64_t(1) << Context.getCharWidth()) - 1);
3717}
3718
3720 int Pos, int Size, int CharWidth,
3721 bool BigEndian) {
3722 assert(Size > 0);
3723 uint64_t Mask = 0;
3724 if (BigEndian) {
3725 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3726 ++P)
3727 Mask = (Mask << CharWidth) | *P;
3728 } else {
3729 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3730 do
3731 Mask = (Mask << CharWidth) | *--P;
3732 while (P != End);
3733 }
3734 return Mask;
3735}
3736
3737// Emit code to clear the bits in a record, which aren't a part of any user
3738// declared member, when the record is a function return.
3739llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3740 llvm::IntegerType *ITy,
3741 QualType QTy) {
3742 assert(Src->getType() == ITy);
3743 assert(ITy->getScalarSizeInBits() <= 64);
3744
3745 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3746 int Size = DataLayout.getTypeStoreSize(ITy);
3747 SmallVector<uint64_t, 4> Bits(Size);
3748 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3749
3750 int CharWidth = CGM.getContext().getCharWidth();
3751 uint64_t Mask =
3752 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3753
3754 return Builder.CreateAnd(Src, Mask, "cmse.clear");
3755}
3756
3757// Emit code to clear the bits in a record, which aren't a part of any user
3758// declared member, when the record is a function argument.
3759llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3760 llvm::ArrayType *ATy,
3761 QualType QTy) {
3762 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3763 int Size = DataLayout.getTypeStoreSize(ATy);
3764 SmallVector<uint64_t, 16> Bits(Size);
3765 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3766
3767 // Clear each element of the LLVM array.
3768 int CharWidth = CGM.getContext().getCharWidth();
3769 int CharsPerElt =
3770 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3771 int MaskIndex = 0;
3772 llvm::Value *R = llvm::PoisonValue::get(ATy);
3773 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3774 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3775 DataLayout.isBigEndian());
3776 MaskIndex += CharsPerElt;
3777 llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3778 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3779 R = Builder.CreateInsertValue(R, T1, I);
3780 }
3781
3782 return R;
3783}
3784
3786 bool EmitRetDbgLoc,
3787 SourceLocation EndLoc) {
3788 if (FI.isNoReturn()) {
3789 // Noreturn functions don't return.
3790 EmitUnreachable(EndLoc);
3791 return;
3792 }
3793
3794 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3795 // Naked functions don't have epilogues.
3796 Builder.CreateUnreachable();
3797 return;
3798 }
3799
3800 // Functions with no result always return void.
3801 if (!ReturnValue.isValid()) {
3802 Builder.CreateRetVoid();
3803 return;
3804 }
3805
3806 llvm::DebugLoc RetDbgLoc;
3807 llvm::Value *RV = nullptr;
3808 QualType RetTy = FI.getReturnType();
3809 const ABIArgInfo &RetAI = FI.getReturnInfo();
3810
3811 switch (RetAI.getKind()) {
3813 // Aggregates get evaluated directly into the destination. Sometimes we
3814 // need to return the sret value in a register, though.
3815 assert(hasAggregateEvaluationKind(RetTy));
3816 if (RetAI.getInAllocaSRet()) {
3817 llvm::Function::arg_iterator EI = CurFn->arg_end();
3818 --EI;
3819 llvm::Value *ArgStruct = &*EI;
3820 llvm::Value *SRet = Builder.CreateStructGEP(
3821 FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex());
3822 llvm::Type *Ty =
3823 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3824 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
3825 }
3826 break;
3827
3828 case ABIArgInfo::Indirect: {
3829 auto AI = CurFn->arg_begin();
3830 if (RetAI.isSRetAfterThis())
3831 ++AI;
3832 switch (getEvaluationKind(RetTy)) {
3833 case TEK_Complex: {
3834 ComplexPairTy RT =
3837 /*isInit*/ true);
3838 break;
3839 }
3840 case TEK_Aggregate:
3841 // Do nothing; aggregates get evaluated directly into the destination.
3842 break;
3843 case TEK_Scalar: {
3844 LValueBaseInfo BaseInfo;
3845 TBAAAccessInfo TBAAInfo;
3846 CharUnits Alignment =
3847 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
3848 Address ArgAddr(&*AI, ConvertType(RetTy), Alignment);
3849 LValue ArgVal =
3850 LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo);
3852 Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true);
3853 break;
3854 }
3855 }
3856 break;
3857 }
3858
3859 case ABIArgInfo::Extend:
3860 case ABIArgInfo::Direct:
3861 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3862 RetAI.getDirectOffset() == 0) {
3863 // The internal return value temp always will have pointer-to-return-type
3864 // type, just do a load.
3865
3866 // If there is a dominating store to ReturnValue, we can elide
3867 // the load, zap the store, and usually zap the alloca.
3868 if (llvm::StoreInst *SI =
3870 // Reuse the debug location from the store unless there is
3871 // cleanup code to be emitted between the store and return
3872 // instruction.
3873 if (EmitRetDbgLoc && !AutoreleaseResult)
3874 RetDbgLoc = SI->getDebugLoc();
3875 // Get the stored value and nuke the now-dead store.
3876 RV = SI->getValueOperand();
3877 SI->eraseFromParent();
3878
3879 // Otherwise, we have to do a simple load.
3880 } else {
3882 }
3883 } else {
3884 // If the value is offset in memory, apply the offset now.
3885 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3886
3887 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3888 }
3889
3890 // In ARC, end functions that return a retainable type with a call
3891 // to objc_autoreleaseReturnValue.
3892 if (AutoreleaseResult) {
3893#ifndef NDEBUG
3894 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3895 // been stripped of the typedefs, so we cannot use RetTy here. Get the
3896 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3897 // CurCodeDecl or BlockInfo.
3898 QualType RT;
3899
3900 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3901 RT = FD->getReturnType();
3902 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3903 RT = MD->getReturnType();
3904 else if (isa<BlockDecl>(CurCodeDecl))
3906 else
3907 llvm_unreachable("Unexpected function/method type");
3908
3909 assert(getLangOpts().ObjCAutoRefCount &&
3910 !FI.isReturnsRetained() &&
3911 RT->isObjCRetainableType());
3912#endif
3913 RV = emitAutoreleaseOfResult(*this, RV);
3914 }
3915
3916 break;
3917
3918 case ABIArgInfo::Ignore:
3919 break;
3920
3922 auto coercionType = RetAI.getCoerceAndExpandType();
3923
3924 // Load all of the coerced elements out into results.
3926 Address addr = ReturnValue.withElementType(coercionType);
3927 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3928 auto coercedEltType = coercionType->getElementType(i);
3929 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3930 continue;
3931
3932 auto eltAddr = Builder.CreateStructGEP(addr, i);
3933 auto elt = Builder.CreateLoad(eltAddr);
3934 results.push_back(elt);
3935 }
3936
3937 // If we have one result, it's the single direct result type.
3938 if (results.size() == 1) {
3939 RV = results[0];
3940
3941 // Otherwise, we need to make a first-class aggregate.
3942 } else {
3943 // Construct a return type that lacks padding elements.
3944 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3945
3946 RV = llvm::PoisonValue::get(returnType);
3947 for (unsigned i = 0, e = results.size(); i != e; ++i) {
3948 RV = Builder.CreateInsertValue(RV, results[i], i);
3949 }
3950 }
3951 break;
3952 }
3953 case ABIArgInfo::Expand:
3955 llvm_unreachable("Invalid ABI kind for return argument");
3956 }
3957
3958 llvm::Instruction *Ret;
3959 if (RV) {
3960 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3961 // For certain return types, clear padding bits, as they may reveal
3962 // sensitive information.
3963 // Small struct/union types are passed as integers.
3964 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3965 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3966 RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3967 }
3969 Ret = Builder.CreateRet(RV);
3970 } else {
3971 Ret = Builder.CreateRetVoid();
3972 }
3973
3974 if (RetDbgLoc)
3975 Ret->setDebugLoc(std::move(RetDbgLoc));
3976}
3977
3978void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3979 // A current decl may not be available when emitting vtable thunks.
3980 if (!CurCodeDecl)
3981 return;
3982
3983 // If the return block isn't reachable, neither is this check, so don't emit
3984 // it.
3985 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3986 return;
3987
3988 ReturnsNonNullAttr *RetNNAttr = nullptr;
3989 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3990 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3991
3992 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3993 return;
3994
3995 // Prefer the returns_nonnull attribute if it's present.
3996 SourceLocation AttrLoc;
3997 SanitizerMask CheckKind;
3998 SanitizerHandler Handler;
3999 if (RetNNAttr) {
4000 assert(!requiresReturnValueNullabilityCheck() &&
4001 "Cannot check nullability and the nonnull attribute");
4002 AttrLoc = RetNNAttr->getLocation();
4003 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
4004 Handler = SanitizerHandler::NonnullReturn;
4005 } else {
4006 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
4007 if (auto *TSI = DD->getTypeSourceInfo())
4008 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
4009 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4010 CheckKind = SanitizerKind::NullabilityReturn;
4011 Handler = SanitizerHandler::NullabilityReturn;
4012 }
4013
4014 SanitizerScope SanScope(this);
4015
4016 // Make sure the "return" source location is valid. If we're checking a
4017 // nullability annotation, make sure the preconditions for the check are met.
4018 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
4019 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
4020 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
4021 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
4022 if (requiresReturnValueNullabilityCheck())
4023 CanNullCheck =
4024 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4025 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4026 EmitBlock(Check);
4027
4028 // Now do the null check.
4029 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
4030 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
4031 llvm::Value *DynamicData[] = {SLocPtr};
4032 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4033
4034 EmitBlock(NoCheck);
4035
4036#ifndef NDEBUG
4037 // The return location should not be used after the check has been emitted.
4038 ReturnLocation = Address::invalid();
4039#endif
4040}
4041
4043 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
4044 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
4045}
4046
4048 QualType Ty) {
4049 // FIXME: Generate IR in one pass, rather than going back and fixing up these
4050 // placeholders.
4051 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
4052 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
4053 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4054
4055 // FIXME: When we generate this IR in one pass, we shouldn't need
4056 // this win32-specific alignment hack.
4058 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
4059
4060 return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align),
4061 Ty.getQualifiers(),
4066}
4067
4069 const VarDecl *param,
4070 SourceLocation loc) {
4071 // StartFunction converted the ABI-lowered parameter(s) into a
4072 // local alloca. We need to turn that into an r-value suitable
4073 // for EmitCall.
4074 Address local = GetAddrOfLocalVar(param);
4075
4076 QualType type = param->getType();
4077
4078 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
4079 // but the argument needs to be the original pointer.
4080 if (type->isReferenceType()) {
4081 args.add(RValue::get(Builder.CreateLoad(local)), type);
4082
4083 // In ARC, move out of consumed arguments so that the release cleanup
4084 // entered by StartFunction doesn't cause an over-release. This isn't
4085 // optimal -O0 code generation, but it should get cleaned up when
4086 // optimization is enabled. This also assumes that delegate calls are
4087 // performed exactly once for a set of arguments, but that should be safe.
4088 } else if (getLangOpts().ObjCAutoRefCount &&
4089 param->hasAttr<NSConsumedAttr>() &&
4090 type->isObjCRetainableType()) {
4091 llvm::Value *ptr = Builder.CreateLoad(local);
4092 auto null =
4093 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4094 Builder.CreateStore(null, local);
4095 args.add(RValue::get(ptr), type);
4096
4097 // For the most part, we just need to load the alloca, except that
4098 // aggregate r-values are actually pointers to temporaries.
4099 } else {
4100 args.add(convertTempToRValue(local, type, loc), type);
4101 }
4102
4103 // Deactivate the cleanup for the callee-destructed param that was pushed.
4104 if (type->isRecordType() && !CurFuncIsThunk &&
4106 param->needsDestruction(getContext())) {
4108 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4109 assert(cleanup.isValid() &&
4110 "cleanup for callee-destructed param not recorded");
4111 // This unreachable is a temporary marker which will be removed later.
4112 llvm::Instruction *isActive = Builder.CreateUnreachable();
4113 args.addArgCleanupDeactivation(cleanup, isActive);
4114 }
4115}
4116
4117static bool isProvablyNull(llvm::Value *addr) {
4118 return isa<llvm::ConstantPointerNull>(addr);
4119}
4120
4121/// Emit the actual writing-back of a writeback.
4123 const CallArgList::Writeback &writeback) {
4124 const LValue &srcLV = writeback.Source;
4125 Address srcAddr = srcLV.getAddress(CGF);
4126 assert(!isProvablyNull(srcAddr.getPointer()) &&
4127 "shouldn't have writeback for provably null argument");
4128
4129 llvm::BasicBlock *contBB = nullptr;
4130
4131 // If the argument wasn't provably non-null, we need to null check
4132 // before doing the store.
4133 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
4134 CGF.CGM.getDataLayout());
4135 if (!provablyNonNull) {
4136 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
4137 contBB = CGF.createBasicBlock("icr.done");
4138
4139 llvm::Value *isNull =
4140 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
4141 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
4142 CGF.EmitBlock(writebackBB);
4143 }
4144
4145 // Load the value to writeback.
4146 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
4147
4148 // Cast it back, in case we're writing an id to a Foo* or something.
4149 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
4150 "icr.writeback-cast");
4151
4152 // Perform the writeback.
4153
4154 // If we have a "to use" value, it's something we need to emit a use
4155 // of. This has to be carefully threaded in: if it's done after the
4156 // release it's potentially undefined behavior (and the optimizer
4157 // will ignore it), and if it happens before the retain then the
4158 // optimizer could move the release there.
4159 if (writeback.ToUse) {
4160 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
4161
4162 // Retain the new value. No need to block-copy here: the block's
4163 // being passed up the stack.
4164 value = CGF.EmitARCRetainNonBlock(value);
4165
4166 // Emit the intrinsic use here.
4167 CGF.EmitARCIntrinsicUse(writeback.ToUse);
4168
4169 // Load the old value (primitively).
4170 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
4171
4172 // Put the new value in place (primitively).
4173 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
4174
4175 // Release the old value.
4176 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
4177
4178 // Otherwise, we can just do a normal lvalue store.
4179 } else {
4180 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
4181 }
4182
4183 // Jump to the continuation block.
4184 if (!provablyNonNull)
4185 CGF.EmitBlock(contBB);
4186}
4187
4189 const CallArgList &args) {
4190 for (const auto &I : args.writebacks())
4191 emitWriteback(CGF, I);
4192}
4193
4195 const CallArgList &CallArgs) {
4197 CallArgs.getCleanupsToDeactivate();
4198 // Iterate in reverse to increase the likelihood of popping the cleanup.
4199 for (const auto &I : llvm::reverse(Cleanups)) {
4200 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
4201 I.IsActiveIP->eraseFromParent();
4202 }
4203}
4204
4205static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
4206 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
4207 if (uop->getOpcode() == UO_AddrOf)
4208 return uop->getSubExpr();
4209 return nullptr;
4210}
4211
4212/// Emit an argument that's being passed call-by-writeback. That is,
4213/// we are passing the address of an __autoreleased temporary; it
4214/// might be copy-initialized with the current value of the given
4215/// address, but it will definitely be copied out of after the call.
4217 const ObjCIndirectCopyRestoreExpr *CRE) {
4218 LValue srcLV;
4219
4220 // Make an optimistic effort to emit the address as an l-value.
4221 // This can fail if the argument expression is more complicated.
4222 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
4223 srcLV = CGF.EmitLValue(lvExpr);
4224
4225 // Otherwise, just emit it as a scalar.
4226 } else {
4227 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
4228
4229 QualType srcAddrType =
4230 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
4231 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
4232 }
4233 Address srcAddr = srcLV.getAddress(CGF);
4234
4235 // The dest and src types don't necessarily match in LLVM terms
4236 // because of the crazy ObjC compatibility rules.
4237
4238 llvm::PointerType *destType =
4239 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
4240 llvm::Type *destElemType =
4242
4243 // If the address is a constant null, just pass the appropriate null.
4244 if (isProvablyNull(srcAddr.getPointer())) {
4245 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
4246 CRE->getType());
4247 return;
4248 }
4249
4250 // Create the temporary.
4251 Address temp =
4252 CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp");
4253 // Loading an l-value can introduce a cleanup if the l-value is __weak,
4254 // and that cleanup will be conditional if we can't prove that the l-value
4255 // isn't null, so we need to register a dominating point so that the cleanups
4256 // system will make valid IR.
4257 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4258
4259 // Zero-initialize it if we're not doing a copy-initialization.
4260 bool shouldCopy = CRE->shouldCopy();
4261 if (!shouldCopy) {
4262 llvm::Value *null =
4263 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4264 CGF.Builder.CreateStore(null, temp);
4265 }
4266
4267 llvm::BasicBlock *contBB = nullptr;
4268 llvm::BasicBlock *originBB = nullptr;
4269
4270 // If the address is *not* known to be non-null, we need to switch.
4271 llvm::Value *finalArgument;
4272
4273 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
4274 CGF.CGM.getDataLayout());
4275 if (provablyNonNull) {
4276 finalArgument = temp.getPointer();
4277 } else {
4278 llvm::Value *isNull =
4279 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
4280
4281 finalArgument = CGF.Builder.CreateSelect(isNull,
4282 llvm::ConstantPointerNull::get(destType),
4283 temp.getPointer(), "icr.argument");
4284
4285 // If we need to copy, then the load has to be conditional, which
4286 // means we need control flow.
4287 if (shouldCopy) {
4288 originBB = CGF.Builder.GetInsertBlock();
4289 contBB = CGF.createBasicBlock("icr.cont");
4290 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
4291 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
4292 CGF.EmitBlock(copyBB);
4293 condEval.begin(CGF);
4294 }
4295 }
4296
4297 llvm::Value *valueToUse = nullptr;
4298
4299 // Perform a copy if necessary.
4300 if (shouldCopy) {
4301 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
4302 assert(srcRV.isScalar());
4303
4304 llvm::Value *src = srcRV.getScalarVal();
4305 src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast");
4306
4307 // Use an ordinary store, not a store-to-lvalue.
4308 CGF.Builder.CreateStore(src, temp);
4309
4310 // If optimization is enabled, and the value was held in a
4311 // __strong variable, we need to tell the optimizer that this
4312 // value has to stay alive until we're doing the store back.
4313 // This is because the temporary is effectively unretained,
4314 // and so otherwise we can violate the high-level semantics.
4315 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4317 valueToUse = src;
4318 }
4319 }
4320
4321 // Finish the control flow if we needed it.
4322 if (shouldCopy && !provablyNonNull) {
4323 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
4324 CGF.EmitBlock(contBB);
4325
4326 // Make a phi for the value to intrinsically use.
4327 if (valueToUse) {
4328 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
4329 "icr.to-use");
4330 phiToUse->addIncoming(valueToUse, copyBB);
4331 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4332 originBB);
4333 valueToUse = phiToUse;
4334 }
4335
4336 condEval.end(CGF);
4337 }
4338
4339 args.addWriteback(srcLV, temp, valueToUse);
4340 args.add(RValue::get(finalArgument), CRE->getType());
4341}
4342
4344 assert(!StackBase);
4345
4346 // Save the stack.
4347 StackBase = CGF.Builder.CreateStackSave("inalloca.save");
4348}
4349
4351 if (StackBase) {
4352 // Restore the stack after the call.
4353 CGF.Builder.CreateStackRestore(StackBase);
4354 }
4355}
4356
4358 SourceLocation ArgLoc,
4359 AbstractCallee AC,
4360 unsigned ParmNum) {
4361 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
4362 SanOpts.has(SanitizerKind::NullabilityArg)))
4363 return;
4364
4365 // The param decl may be missing in a variadic function.
4366 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
4367 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4368
4369 // Prefer the nonnull attribute if it's present.
4370 const NonNullAttr *NNAttr = nullptr;
4371 if (SanOpts.has(SanitizerKind::NonnullAttribute))
4372 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
4373
4374 bool CanCheckNullability = false;
4375 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
4376 auto Nullability = PVD->getType()->getNullability();
4377 CanCheckNullability = Nullability &&
4378 *Nullability == NullabilityKind::NonNull &&
4379 PVD->getTypeSourceInfo();
4380 }
4381
4382 if (!NNAttr && !CanCheckNullability)
4383 return;
4384
4385 SourceLocation AttrLoc;
4386 SanitizerMask CheckKind;
4387 SanitizerHandler Handler;
4388 if (NNAttr) {
4389 AttrLoc = NNAttr->getLocation();
4390 CheckKind = SanitizerKind::NonnullAttribute;
4391 Handler = SanitizerHandler::NonnullArg;
4392 } else {
4393 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4394 CheckKind = SanitizerKind::NullabilityArg;
4395 Handler = SanitizerHandler::NullabilityArg;
4396 }
4397
4398 SanitizerScope SanScope(this);
4399 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
4400 llvm::Constant *StaticData[] = {
4402 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
4403 };
4404 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4405}
4406
4407// Check if the call is going to use the inalloca convention. This needs to
4408// agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
4409// later, so we can't check it directly.
4410static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
4411 ArrayRef<QualType> ArgTypes) {
4412 // The Swift calling conventions don't go through the target-specific
4413 // argument classification, they never use inalloca.
4414 // TODO: Consider limiting inalloca use to only calling conventions supported
4415 // by MSVC.
4416 if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync)
4417 return false;
4418 if (!CGM.getTarget().getCXXABI().isMicrosoft())
4419 return false;
4420 return llvm::any_of(ArgTypes, [&](QualType Ty) {
4421 return isInAllocaArgument(CGM.getCXXABI(), Ty);
4422 });
4423}
4424
4425#ifndef NDEBUG
4426// Determine whether the given argument is an Objective-C method
4427// that may have type parameters in its signature.
4428static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4429 const DeclContext *dc = method->getDeclContext();
4430 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
4431 return classDecl->getTypeParamListAsWritten();
4432 }
4433
4434 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4435 return catDecl->getTypeParamList();
4436 }
4437
4438 return false;
4439}
4440#endif
4441
4442/// EmitCallArgs - Emit call arguments for a function.
4444 CallArgList &Args, PrototypeWrapper Prototype,
4445 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4446 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
4448
4449 assert((ParamsToSkip == 0 || Prototype.P) &&
4450 "Can't skip parameters if type info is not provided");
4451
4452 // This variable only captures *explicitly* written conventions, not those
4453 // applied by default via command line flags or target defaults, such as
4454 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
4455 // require knowing if this is a C++ instance method or being able to see
4456 // unprototyped FunctionTypes.
4457 CallingConv ExplicitCC = CC_C;
4458
4459 // First, if a prototype was provided, use those argument types.
4460 bool IsVariadic = false;
4461 if (Prototype.P) {
4462 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
4463 if (MD) {
4464 IsVariadic = MD->isVariadic();
4465 ExplicitCC = getCallingConventionForDecl(
4466 MD, CGM.getTarget().getTriple().isOSWindows());
4467 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4468 MD->param_type_end());
4469 } else {
4470 const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
4471 IsVariadic = FPT->isVariadic();
4472 ExplicitCC = FPT->getExtInfo().getCC();
4473 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4474 FPT->param_type_end());
4475 }
4476
4477#ifndef NDEBUG
4478 // Check that the prototyped types match the argument expression types.
4479 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
4480 CallExpr::const_arg_iterator Arg = ArgRange.begin();
4481 for (QualType Ty : ArgTypes) {
4482 assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4483 assert(
4484 (isGenericMethod || Ty->isVariablyModifiedType() ||
4485 Ty.getNonReferenceType()->isObjCRetainableType() ||
4486 getContext()
4487 .getCanonicalType(Ty.getNonReferenceType())
4488 .getTypePtr() ==
4489 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4490 "type mismatch in call argument!");
4491 ++Arg;
4492 }
4493
4494 // Either we've emitted all the call args, or we have a call to variadic
4495 // function.
4496 assert((Arg == ArgRange.end() || IsVariadic) &&
4497 "Extra arguments in non-variadic function!");
4498#endif
4499 }
4500
4501 // If we still have any arguments, emit them using the type of the argument.
4502 for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4503 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4504 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4505
4506 // We must evaluate arguments from right to left in the MS C++ ABI,
4507 // because arguments are destroyed left to right in the callee. As a special
4508 // case, there are certain language constructs that require left-to-right
4509 // evaluation, and in those cases we consider the evaluation order requirement
4510 // to trump the "destruction order is reverse construction order" guarantee.
4511 bool LeftToRight =
4515
4516 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
4517 RValue EmittedArg) {
4518 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4519 return;
4520 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4521 if (PS == nullptr)
4522 return;
4523
4524 const auto &Context = getContext();
4525 auto SizeTy = Context.getSizeType();
4526 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4527 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
4528 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4529 EmittedArg.getScalarVal(),
4530 PS->isDynamic());
4531 Args.add(RValue::get(V), SizeTy);
4532 // If we're emitting args in reverse, be sure to do so with
4533 // pass_object_size, as well.
4534 if (!LeftToRight)
4535 std::swap(Args.back(), *(&Args.back() - 1));
4536 };
4537
4538 // Insert a stack save if we're going to need any inalloca args.
4539 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4540 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4541 "inalloca only supported on x86");
4542 Args.allocateArgumentMemory(*this);
4543 }
4544
4545 // Evaluate each argument in the appropriate order.
4546 size_t CallArgsStart = Args.size();
4547 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4548 unsigned Idx = LeftToRight ? I : E - I - 1;
4549 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4550 unsigned InitialArgSize = Args.size();
4551 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4552 // the argument and parameter match or the objc method is parameterized.
4553 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4554 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4555 ArgTypes[Idx]) ||
4556 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4557 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
4558 "Argument and parameter types don't match");
4559 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4560 // In particular, we depend on it being the last arg in Args, and the
4561 // objectsize bits depend on there only being one arg if !LeftToRight.
4562 assert(InitialArgSize + 1 == Args.size() &&
4563 "The code below depends on only adding one arg per EmitCallArg");
4564 (void)InitialArgSize;
4565 // Since pointer argument are never emitted as LValue, it is safe to emit
4566 // non-null argument check for r-value only.
4567 if (!Args.back().hasLValue()) {
4568 RValue RVArg = Args.back().getKnownRValue();
4569 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4570 ParamsToSkip + Idx);
4571 // @llvm.objectsize should never have side-effects and shouldn't need
4572 // destruction/cleanups, so we can safely "emit" it after its arg,
4573 // regardless of right-to-leftness
4574 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4575 }
4576 }
4577
4578 if (!LeftToRight) {
4579 // Un-reverse the arguments we just evaluated so they match up with the LLVM
4580 // IR function.
4581 std::reverse(Args.begin() + CallArgsStart, Args.end());
4582 }
4583}
4584
4585namespace {
4586
4587struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4588 DestroyUnpassedArg(Address Addr, QualType Ty)
4589 : Addr(Addr), Ty(Ty) {}
4590
4591 Address Addr;
4592 QualType Ty;
4593
4594 void Emit(CodeGenFunction &CGF, Flags flags) override {
4596 if (DtorKind == QualType::DK_cxx_destructor) {
4597 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4598 assert(!Dtor->isTrivial());
4599 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4600 /*Delegating=*/false, Addr, Ty);
4601 } else {
4602 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4603 }
4604 }
4605};
4606
4607struct DisableDebugLocationUpdates {
4608 CodeGenFunction &CGF;
4609 bool disabledDebugInfo;
4610 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4611 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4612 CGF.disableDebugInfo();
4613 }
4614 ~DisableDebugLocationUpdates() {
4615 if (disabledDebugInfo)
4616 CGF.enableDebugInfo();
4617 }
4618};
4619
4620} // end anonymous namespace
4621
4623 if (!HasLV)
4624 return RV;
4627 LV.isVolatile());
4628 IsUsed = true;
4629 return RValue::getAggregate(Copy.getAddress(CGF));
4630}
4631
4633 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4634 if (!HasLV && RV.isScalar())
4635 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4636 else if (!HasLV && RV.isComplex())
4637 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4638 else {
4639 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4640 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4641 // We assume that call args are never copied into subobjects.
4643 HasLV ? LV.isVolatileQualified()
4645 }
4646 IsUsed = true;
4647}
4648
4649void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4650 QualType type) {
4651 DisableDebugLocationUpdates Dis(*this, E);
4652 if (const ObjCIndirectCopyRestoreExpr *CRE
4653 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4654 assert(getLangOpts().ObjCAutoRefCount);
4655 return emitWritebackArg(*this, args, CRE);
4656 }
4657
4658 assert(type->isReferenceType() == E->isGLValue() &&
4659 "reference binding to unmaterialized r-value!");
4660
4661 if (E->isGLValue()) {
4662 assert(E->getObjectKind() == OK_Ordinary);
4663 return args.add(EmitReferenceBindingToExpr(E), type);
4664 }
4665
4666 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4667
4668 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4669 // However, we still have to push an EH-only cleanup in case we unwind before
4670 // we make it to the call.
4671 if (type->isRecordType() &&
4673 // If we're using inalloca, use the argument memory. Otherwise, use a
4674 // temporary.
4675 AggValueSlot Slot = args.isUsingInAlloca()
4676 ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
4677
4678 bool DestroyedInCallee = true, NeedsEHCleanup = true;
4679 if (const auto *RD = type->getAsCXXRecordDecl())
4680 DestroyedInCallee = RD->hasNonTrivialDestructor();
4681 else
4682 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4683
4684 if (DestroyedInCallee)
4686
4687 EmitAggExpr(E, Slot);
4688 RValue RV = Slot.asRValue();
4689 args.add(RV, type);
4690
4691 if (DestroyedInCallee && NeedsEHCleanup) {
4692 // Create a no-op GEP between the placeholder and the cleanup so we can
4693 // RAUW it successfully. It also serves as a marker of the first
4694 // instruction where the cleanup is active.
4695 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4696 type);
4697 // This unreachable is a temporary marker which will be removed later.
4698 llvm::Instruction *IsActive = Builder.CreateUnreachable();
4700 }
4701 return;
4702 }
4703
4704 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4705 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4706 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4707 assert(L.isSimple());
4708 args.addUncopiedAggregate(L, type);
4709 return;
4710 }
4711
4712 args.add(EmitAnyExprToTemp(E), type);
4713}
4714
4715QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4716 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4717 // implicitly widens null pointer constants that are arguments to varargs
4718 // functions to pointer-sized ints.
4719 if (!getTarget().getTriple().isOSWindows())
4720 return Arg->getType();
4721
4722 if (Arg->getType()->isIntegerType() &&
4723 getContext().getTypeSize(Arg->getType()) <
4724 getContext().getTargetInfo().getPointerWidth(LangAS::Default) &&
4727 return getContext().getIntPtrType();
4728 }
4729
4730 return Arg->getType();
4731}
4732
4733// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4734// optimizer it can aggressively ignore unwind edges.
4735void
4736CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4737 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4738 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4739 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4741}
4742
4743/// Emits a call to the given no-arguments nounwind runtime function.
4744llvm::CallInst *
4745CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4746 const llvm::Twine &name) {
4747 return EmitNounwindRuntimeCall(callee, std::nullopt, name);
4748}
4749
4750/// Emits a call to the given nounwind runtime function.
4751llvm::CallInst *
4752CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4754 const llvm::Twine &name) {
4755 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4756 call->setDoesNotThrow();
4757 return call;
4758}
4759
4760/// Emits a simple call (never an invoke) to the given no-arguments
4761/// runtime function.
4762llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4763 const llvm::Twine &name) {
4764 return EmitRuntimeCall(callee, std::nullopt, name);
4765}
4766
4767// Calls which may throw must have operand bundles indicating which funclet
4768// they are nested within.
4770CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4771 // There is no need for a funclet operand bundle if we aren't inside a
4772 // funclet.
4773 if (!CurrentFuncletPad)
4775
4776 // Skip intrinsics which cannot throw (as long as they don't lower into
4777 // regular function calls in the course of IR transformations).
4778 if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
4779 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4780 auto IID = CalleeFn->getIntrinsicID();
4781 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4783 }
4784 }
4785
4787 BundleList.emplace_back("funclet", CurrentFuncletPad);
4788 return BundleList;
4789}
4790
4791/// Emits a simple call (never an invoke) to the given runtime function.
4792llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4794 const llvm::Twine &name) {
4795 llvm::CallInst *call = Builder.CreateCall(
4796 callee, args, getBundlesForFunclet(callee.getCallee()), name);
4797 call->setCallingConv(getRuntimeCC());
4798 return call;
4799}
4800
4801/// Emits a call or invoke to the given noreturn runtime function.
4803 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4805 getBundlesForFunclet(callee.getCallee());
4806
4807 if (getInvokeDest()) {
4808 llvm::InvokeInst *invoke =
4809 Builder.CreateInvoke(callee,
4811 getInvokeDest(),
4812 args,
4813 BundleList);
4814 invoke->setDoesNotReturn();
4815 invoke->setCallingConv(getRuntimeCC());
4816 } else {
4817 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4818 call->setDoesNotReturn();
4819 call->setCallingConv(getRuntimeCC());
4820 Builder.CreateUnreachable();
4821 }
4822}
4823
4824/// Emits a call or invoke instruction to the given nullary runtime function.
4825llvm::CallBase *
4826CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4827 const Twine &name) {
4828 return EmitRuntimeCallOrInvoke(callee, std::nullopt, name);
4829}
4830
4831/// Emits a call or invoke instruction to the given runtime function.
4832llvm::CallBase *
4833CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4835 const Twine &name) {
4836 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4837 call->setCallingConv(getRuntimeCC());
4838 return call;
4839}
4840
4841/// Emits a call or invoke instruction to the given function, depending
4842/// on the current state of the EH stack.
4843llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4845 const Twine &Name) {
4846 llvm::BasicBlock *InvokeDest = getInvokeDest();
4848 getBundlesForFunclet(Callee.getCallee());
4849
4850 llvm::CallBase *Inst;
4851 if (!InvokeDest)
4852 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4853 else {
4854 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4855 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4856 Name);
4857 EmitBlock(ContBB);
4858 }
4859
4860 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4861 // optimizer it can aggressively ignore unwind edges.
4862 if (CGM.getLangOpts().ObjCAutoRefCount)
4863 AddObjCARCExceptionMetadata(Inst);
4864
4865 return Inst;
4866}
4867
4868void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4869 llvm::Value *New) {
4870 DeferredReplacements.push_back(
4871 std::make_pair(llvm::WeakTrackingVH(Old), New));
4872}
4873
4874namespace {
4875
4876/// Specify given \p NewAlign as the alignment of return value attribute. If
4877/// such attribute already exists, re-set it to the maximal one of two options.
4878[[nodiscard]] llvm::AttributeList
4879maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4880 const llvm::AttributeList &Attrs,
4881 llvm::Align NewAlign) {
4882 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4883 if (CurAlign >= NewAlign)
4884 return Attrs;
4885 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4886 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4887 .addRetAttribute(Ctx, AlignAttr);
4888}
4889
4890template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4891protected:
4892 CodeGenFunction &CGF;
4893
4894 /// We do nothing if this is, or becomes, nullptr.
4895 const AlignedAttrTy *AA = nullptr;
4896
4897 llvm::Value *Alignment = nullptr; // May or may not be a constant.
4898 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4899
4900 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4901 : CGF(CGF_) {
4902 if (!FuncDecl)
4903 return;
4904 AA = FuncDecl->getAttr<AlignedAttrTy>();
4905 }
4906
4907public:
4908 /// If we can, materialize the alignment as an attribute on return value.
4909 [[nodiscard]] llvm::AttributeList
4910 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4911 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4912 return Attrs;
4913 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4914 if (!AlignmentCI)
4915 return Attrs;
4916 // We may legitimately have non-power-of-2 alignment here.
4917 // If so, this is UB land, emit it via `@llvm.assume` instead.
4918 if (!AlignmentCI->getValue().isPowerOf2())
4919 return Attrs;
4920 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4921 CGF.getLLVMContext(), Attrs,
4922 llvm::Align(
4923 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4924 AA = nullptr; // We're done. Disallow doing anything else.
4925 return NewAttrs;
4926 }
4927
4928 /// Emit alignment assumption.
4929 /// This is a general fallback that we take if either there is an offset,
4930 /// or the alignment is variable or we are sanitizing for alignment.
4931 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4932 if (!AA)
4933 return;
4934 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4935 AA->getLocation(), Alignment, OffsetCI);
4936 AA = nullptr; // We're done. Disallow doing anything else.
4937 }
4938};
4939
4940/// Helper data structure to emit `AssumeAlignedAttr`.
4941class AssumeAlignedAttrEmitter final
4942 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4943public:
4944 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4945 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4946 if (!AA)
4947 return;
4948 // It is guaranteed that the alignment/offset are constants.
4949 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4950 if (Expr *Offset = AA->getOffset()) {
4951 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4952 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4953 OffsetCI = nullptr;
4954 }
4955 }
4956};
4957
4958/// Helper data structure to emit `AllocAlignAttr`.
4959class AllocAlignAttrEmitter final
4960 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4961public:
4962 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4963 const CallArgList &CallArgs)
4964 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4965 if (!AA)
4966 return;
4967 // Alignment may or may not be a constant, and that is okay.
4968 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4969 .getRValue(CGF)
4970 .getScalarVal();
4971 }
4972};
4973
4974} // namespace
4975
4976static unsigned getMaxVectorWidth(const llvm::Type *Ty) {
4977 if (auto *VT = dyn_cast<llvm::VectorType>(Ty))
4978 return VT->getPrimitiveSizeInBits().getKnownMinValue();
4979 if (auto *AT = dyn_cast<llvm::ArrayType>(Ty))
4980 return getMaxVectorWidth(AT->getElementType());
4981
4982 unsigned MaxVectorWidth = 0;
4983 if (auto *ST = dyn_cast<llvm::StructType>(Ty))
4984 for (auto *I : ST->elements())
4985 MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I));
4986 return MaxVectorWidth;
4987}
4988
4990 const CGCallee &Callee,
4991 ReturnValueSlot ReturnValue,
4992 const CallArgList &CallArgs,
4993 llvm::CallBase **callOrInvoke, bool IsMustTail,
4994 SourceLocation Loc) {
4995 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4996
4997 assert(Callee.isOrdinary() || Callee.isVirtual());
4998
4999 // Handle struct-return functions by passing a pointer to the
5000 // location that we would like to return into.
5001 QualType RetTy = CallInfo.getReturnType();
5002 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
5003
5004 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
5005
5006 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5007 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5008 // We can only guarantee that a function is called from the correct
5009 // context/function based on the appropriate target attributes,
5010 // so only check in the case where we have both always_inline and target
5011 // since otherwise we could be making a conditional call after a check for
5012 // the proper cpu features (and it won't cause code generation issues due to
5013 // function based code generation).
5014 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
5015 (TargetDecl->hasAttr<TargetAttr>() ||
5016 (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>())))
5017 checkTargetFeatures(Loc, FD);
5018
5019 // Some architectures (such as x86-64) have the ABI changed based on
5020 // attribute-target/features. Give them a chance to diagnose.
5022 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
5023 }
5024
5025 // 1. Set up the arguments.
5026
5027 // If we're using inalloca, insert the allocation after the stack save.
5028 // FIXME: Do this earlier rather than hacking it in here!
5029 Address ArgMemory = Address::invalid();
5030 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
5031 const llvm::DataLayout &DL = CGM.getDataLayout();
5032 llvm::Instruction *IP = CallArgs.getStackBase();
5033 llvm::AllocaInst *AI;
5034 if (IP) {
5035 IP = IP->getNextNode();
5036 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
5037 "argmem", IP);
5038 } else {
5039 AI = CreateTempAlloca(ArgStruct, "argmem");
5040 }
5041 auto Align = CallInfo.getArgStructAlignment();
5042 AI->setAlignment(Align.getAsAlign());
5043 AI->setUsedWithInAlloca(true);
5044 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5045 ArgMemory = Address(AI, ArgStruct, Align);
5046 }
5047
5048 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
5049 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
5050
5051 // If the call returns a temporary with struct return, create a temporary
5052 // alloca to hold the result, unless one is given to us.
5053 Address SRetPtr = Address::invalid();
5054 Address SRetAlloca = Address::invalid();
5055 llvm::Value *UnusedReturnSizePtr = nullptr;
5056 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
5057 if (!ReturnValue.isNull()) {
5058 SRetPtr = ReturnValue.getValue();
5059 } else {
5060 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
5061 if (HaveInsertPoint() && ReturnValue.isUnused()) {
5062 llvm::TypeSize size =
5063 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
5064 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
5065 }
5066 }
5067 if (IRFunctionArgs.hasSRetArg()) {
5068 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
5069 } else if (RetAI.isInAlloca()) {
5070 Address Addr =
5071 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
5072 Builder.CreateStore(SRetPtr.getPointer(), Addr);
5073 }
5074 }
5075
5076 Address swiftErrorTemp = Address::invalid();
5077 Address swiftErrorArg = Address::invalid();
5078
5079 // When passing arguments using temporary allocas, we need to add the
5080 // appropriate lifetime markers. This vector keeps track of all the lifetime
5081 // markers that need to be ended right after the call.
5082 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
5083
5084 // Translate all of the arguments as necessary to match the IR lowering.
5085 assert(CallInfo.arg_size() == CallArgs.size() &&
5086 "Mismatch between function signature & arguments.");
5087 unsigned ArgNo = 0;
5088 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
5089 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5090 I != E; ++I, ++info_it, ++ArgNo) {
5091 const ABIArgInfo &ArgInfo = info_it->info;
5092
5093 // Insert a padding argument to ensure proper alignment.
5094 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5095 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5096 llvm::UndefValue::get(ArgInfo.getPaddingType());
5097
5098 unsigned FirstIRArg, NumIRArgs;
5099 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5100
5101 bool ArgHasMaybeUndefAttr =
5102 IsArgumentMaybeUndef(TargetDecl, CallInfo.getNumRequiredArgs(), ArgNo);
5103
5104 switch (ArgInfo.getKind()) {
5105 case ABIArgInfo::InAlloca: {
5106 assert(NumIRArgs == 0);
5107 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
5108 if (I->isAggregate()) {
5109 Address Addr = I->hasLValue()
5110 ? I->getKnownLValue().getAddress(*this)
5111 : I->getKnownRValue().getAggregateAddress();
5112 llvm::Instruction *Placeholder =
5113 cast<llvm::Instruction>(Addr.getPointer());
5114
5115 if (!ArgInfo.getInAllocaIndirect()) {
5116 // Replace the placeholder with the appropriate argument slot GEP.
5117 CGBuilderTy::InsertPoint IP = Builder.saveIP();
5118 Builder.SetInsertPoint(Placeholder);
5119 Addr = Builder.CreateStructGEP(ArgMemory,
5120 ArgInfo.getInAllocaFieldIndex());
5121 Builder.restoreIP(IP);
5122 } else {
5123 // For indirect things such as overaligned structs, replace the
5124 // placeholder with a regular aggregate temporary alloca. Store the
5125 // address of this alloca into the struct.
5126 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
5128 ArgMemory, ArgInfo.getInAllocaFieldIndex());
5129 Builder.CreateStore(Addr.getPointer(), ArgSlot);
5130 }
5131 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
5132 } else if (ArgInfo.getInAllocaIndirect()) {
5133 // Make a temporary alloca and store the address of it into the argument
5134 // struct.
5136 I->Ty, getContext().getTypeAlignInChars(I->Ty),
5137 "indirect-arg-temp");
5138 I->copyInto(*this, Addr);
5139 Address ArgSlot =
5140 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
5141 Builder.CreateStore(Addr.getPointer(), ArgSlot);
5142 } else {
5143 // Store the RValue into the argument struct.
5144 Address Addr =
5145 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
5146 Addr = Addr.withElementType(ConvertTypeForMem(I->Ty));
5147 I->copyInto(*this, Addr);
5148 }
5149 break;
5150 }
5151
5154 assert(NumIRArgs == 1);
5155 if (!I->isAggregate()) {
5156 // Make a temporary alloca to pass the argument.
5158 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
5159
5160 llvm::Value *Val = Addr.getPointer();
5161 if (ArgHasMaybeUndefAttr)
5162 Val = Builder.CreateFreeze(Addr.getPointer());
5163 IRCallArgs[FirstIRArg] = Val;
5164
5165 I->copyInto(*this, Addr);
5166 } else {
5167 // We want to avoid creating an unnecessary temporary+copy here;
5168 // however, we need one in three cases:
5169 // 1. If the argument is not byval, and we are required to copy the
5170 // source. (This case doesn't occur on any common architecture.)
5171 // 2. If the argument is byval, RV is not sufficiently aligned, and
5172 // we cannot force it to be sufficiently aligned.
5173 // 3. If the argument is byval, but RV is not located in default
5174 // or alloca address space.
5175 Address Addr = I->hasLValue()
5176 ? I->getKnownLValue().getAddress(*this)
5177 : I->getKnownRValue().getAggregateAddress();
5178 llvm::Value *V = Addr.getPointer();
5179 CharUnits Align = ArgInfo.getIndirectAlign();
5180 const llvm::DataLayout *TD = &CGM.getDataLayout();
5181
5182 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5183 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5184 TD->getAllocaAddrSpace()) &&
5185 "indirect argument must be in alloca address space");
5186
5187 bool NeedCopy = false;
5188 if (Addr.getAlignment() < Align &&
5189 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
5190 Align.getAsAlign()) {
5191 NeedCopy = true;
5192 } else if (I->hasLValue()) {
5193 auto LV = I->getKnownLValue();
5194 auto AS = LV.getAddressSpace();
5195
5196 bool isByValOrRef =
5197 ArgInfo.isIndirectAliased() || ArgInfo.getIndirectByVal();
5198
5199 if (!isByValOrRef ||
5200 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
5201 NeedCopy = true;
5202 }
5203 if (!getLangOpts().OpenCL) {
5204 if ((isByValOrRef &&
5205 (AS != LangAS::Default &&
5206 AS != CGM.getASTAllocaAddressSpace()))) {
5207 NeedCopy = true;
5208 }
5209 }
5210 // For OpenCL even if RV is located in default or alloca address space
5211 // we don't want to perform address space cast for it.
5212 else if ((isByValOrRef &&
5213 Addr.getType()->getAddressSpace() != IRFuncTy->
5214 getParamType(FirstIRArg)->getPointerAddressSpace())) {
5215 NeedCopy = true;
5216 }
5217 }
5218
5219 if (NeedCopy) {
5220 // Create an aligned temporary, and copy to it.
5222 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
5223 llvm::Value *Val = AI.getPointer();
5224 if (ArgHasMaybeUndefAttr)
5225 Val = Builder.CreateFreeze(AI.getPointer());
5226 IRCallArgs[FirstIRArg] = Val;
5227
5228 // Emit lifetime markers for the temporary alloca.
5229 llvm::TypeSize ByvalTempElementSize =
5230 CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
5231 llvm::Value *LifetimeSize =
5232 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
5233
5234 // Add cleanup code to emit the end lifetime marker after the call.
5235 if (LifetimeSize) // In case we disabled lifetime markers.
5236 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5237
5238 // Generate the copy.
5239 I->copyInto(*this, AI);
5240 } else {
5241 // Skip the extra memcpy call.
5242 auto *T = llvm::PointerType::get(
5243 CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
5244
5245 llvm::Value *Val = getTargetHooks().performAddrSpaceCast(
5247 true);
5248 if (ArgHasMaybeUndefAttr)
5249 Val = Builder.CreateFreeze(Val);
5250 IRCallArgs[FirstIRArg] = Val;
5251 }
5252 }
5253 break;
5254 }
5255
5256 case ABIArgInfo::Ignore:
5257 assert(NumIRArgs == 0);
5258 break;
5259
5260 case ABIArgInfo::Extend:
5261 case ABIArgInfo::Direct: {
5262 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
5263 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
5264 ArgInfo.getDirectOffset() == 0) {
5265 assert(NumIRArgs == 1);
5266 llvm::Value *V;
5267 if (!I->isAggregate())
5268 V = I->getKnownRValue().getScalarVal();
5269 else
5271 I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5272 : I->getKnownRValue().getAggregateAddress());
5273
5274 // Implement swifterror by copying into a new swifterror argument.
5275 // We'll write back in the normal path out of the call.
5276 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
5278 assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
5279
5280 QualType pointeeTy = I->Ty->getPointeeType();
5281 swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy),
5282 getContext().getTypeAlignInChars(pointeeTy));
5283
5284 swiftErrorTemp =
5285 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
5286 V = swiftErrorTemp.getPointer();
5287 cast<llvm::AllocaInst>(V)->setSwiftError(true);
5288
5289 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
5290 Builder.CreateStore(errorValue, swiftErrorTemp);
5291 }
5292
5293 // We might have to widen integers, but we should never truncate.
5294 if (ArgInfo.getCoerceToType() != V->getType() &&
5295 V->getType()->isIntegerTy())
5296 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
5297
5298 // If the argument doesn't match, perform a bitcast to coerce it. This
5299 // can happen due to trivial type mismatches.
5300 if (FirstIRArg < IRFuncTy->getNumParams() &&
5301 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5302 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
5303
5304 if (ArgHasMaybeUndefAttr)
5305 V = Builder.CreateFreeze(V);
5306 IRCallArgs[FirstIRArg] = V;
5307 break;
5308 }
5309
5310 llvm::StructType *STy =
5311 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
5312 if (STy && ArgInfo.isDirect() && !ArgInfo.getCanBeFlattened()) {
5313 llvm::Type *SrcTy = ConvertTypeForMem(I->Ty);
5314 [[maybe_unused]] llvm::TypeSize SrcTypeSize =
5315 CGM.getDataLayout().getTypeAllocSize(SrcTy);
5316 [[maybe_unused]] llvm::TypeSize DstTypeSize =
5317 CGM.getDataLayout().getTypeAllocSize(STy);
5318 if (STy->containsHomogeneousScalableVectorTypes()) {
5319 assert(SrcTypeSize == DstTypeSize &&
5320 "Only allow non-fractional movement of structure with "
5321 "homogeneous scalable vector type");
5322
5323 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5324 break;
5325 }
5326 }
5327
5328 // FIXME: Avoid the conversion through memory if possible.
5329 Address Src = Address::invalid();
5330 if (!I->isAggregate()) {
5331 Src = CreateMemTemp(I->Ty, "coerce");
5332 I->copyInto(*this, Src);
5333 } else {
5334 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5335 : I->getKnownRValue().getAggregateAddress();
5336 }
5337
5338 // If the value is offset in memory, apply the offset now.
5339 Src = emitAddressAtOffset(*this, Src, ArgInfo);
5340
5341 // Fast-isel and the optimizer generally like scalar values better than
5342 // FCAs, so we flatten them if this is safe to do for this argument.
5343 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
5344 llvm::Type *SrcTy = Src.getElementType();
5345 llvm::TypeSize SrcTypeSize =
5346 CGM.getDataLayout().getTypeAllocSize(SrcTy);
5347 llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
5348 if (SrcTypeSize.isScalable()) {
5349 assert(STy->containsHomogeneousScalableVectorTypes() &&
5350 "ABI only supports structure with homogeneous scalable vector "
5351 "type");
5352 assert(SrcTypeSize == DstTypeSize &&
5353 "Only allow non-fractional movement of structure with "
5354 "homogeneous scalable vector type");
5355 assert(NumIRArgs == STy->getNumElements());
5356
5357 llvm::Value *StoredStructValue =
5358 Builder.CreateLoad(Src, Src.getName() + ".tuple");
5359 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5360 llvm::Value *Extract = Builder.CreateExtractValue(
5361 StoredStructValue, i, Src.getName() + ".extract" + Twine(i));
5362 IRCallArgs[FirstIRArg + i] = Extract;
5363 }
5364 } else {
5365 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5366 uint64_t DstSize = DstTypeSize.getFixedValue();
5367
5368 // If the source type is smaller than the destination type of the
5369 // coerce-to logic, copy the source value into a temp alloca the size
5370 // of the destination type to allow loading all of it. The bits past
5371 // the source value are left undef.
5372 if (SrcSize < DstSize) {
5373 Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
5374 Src.getName() + ".coerce");
5375 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5376 Src = TempAlloca;
5377 } else {
5378 Src = Src.withElementType(STy);
5379 }
5380
5381 assert(NumIRArgs == STy->getNumElements());
5382 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5383 Address EltPtr = Builder.CreateStructGEP(Src, i);
5384 llvm::Value *LI = Builder.CreateLoad(EltPtr);
5385 if (ArgHasMaybeUndefAttr)
5386 LI = Builder.CreateFreeze(LI);
5387 IRCallArgs[FirstIRArg + i] = LI;
5388 }
5389 }
5390 } else {
5391 // In the simple case, just pass the coerced loaded value.
5392 assert(NumIRArgs == 1);
5393 llvm::Value *Load =
5394 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
5395
5396 if (CallInfo.isCmseNSCall()) {
5397 // For certain parameter types, clear padding bits, as they may reveal
5398 // sensitive information.
5399 // Small struct/union types are passed as integer arrays.
5400 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5401 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5402 Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
5403 }
5404
5405 if (ArgHasMaybeUndefAttr)
5406 Load = Builder.CreateFreeze(Load);
5407 IRCallArgs[FirstIRArg] = Load;
5408 }
5409
5410 break;
5411 }
5412
5414 auto coercionType = ArgInfo.getCoerceAndExpandType();
5415 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
5416
5417 llvm::Value *tempSize = nullptr;
5418 Address addr = Address::invalid();
5419 Address AllocaAddr = Address::invalid();
5420 if (I->isAggregate()) {
5421 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5422 : I->getKnownRValue().getAggregateAddress();
5423
5424 } else {
5425 RValue RV = I->getKnownRValue();
5426 assert(RV.isScalar()); // complex should always just be direct
5427
5428 llvm::Type *scalarType = RV.getScalarVal()->getType();
5429 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
5430 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
5431
5432 // Materialize to a temporary.
5433 addr = CreateTempAlloca(
5434 RV.getScalarVal()->getType(),
5435 CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)),
5436 "tmp",
5437 /*ArraySize=*/nullptr, &AllocaAddr);
5438 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
5439
5440 Builder.CreateStore(RV.getScalarVal(), addr);
5441 }
5442
5443 addr = addr.withElementType(coercionType);
5444
5445 unsigned IRArgPos = FirstIRArg;
5446 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5447 llvm::Type *eltType = coercionType->getElementType(i);
5448 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5449 Address eltAddr = Builder.CreateStructGEP(addr, i);
5450 llvm::Value *elt = Builder.CreateLoad(eltAddr);
5451 if (ArgHasMaybeUndefAttr)
5452 elt = Builder.CreateFreeze(elt);
5453 IRCallArgs[IRArgPos++] = elt;
5454 }
5455 assert(IRArgPos == FirstIRArg + NumIRArgs);
5456
5457 if (tempSize) {
5458 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
5459 }
5460
5461 break;
5462 }
5463
5464 case ABIArgInfo::Expand: {
5465 unsigned IRArgPos = FirstIRArg;
5466 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5467 assert(IRArgPos == FirstIRArg + NumIRArgs);
5468 break;
5469 }
5470 }
5471 }
5472
5473 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
5474 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
5475
5476 // If we're using inalloca, set up that argument.
5477 if (ArgMemory.isValid()) {
5478 llvm::Value *Arg = ArgMemory.getPointer();
5479 assert(IRFunctionArgs.hasInallocaArg());
5480 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5481 }
5482
5483 // 2. Prepare the function pointer.
5484
5485 // If the callee is a bitcast of a non-variadic function to have a
5486 // variadic function pointer type, check to see if we can remove the
5487 // bitcast. This comes up with unprototyped functions.
5488 //
5489 // This makes the IR nicer, but more importantly it ensures that we
5490 // can inline the function at -O0 if it is marked always_inline.
5491 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5492 llvm::Value *Ptr) -> llvm::Function * {
5493 if (!CalleeFT->isVarArg())
5494 return nullptr;
5495
5496 // Get underlying value if it's a bitcast
5497 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5498 if (CE->getOpcode() == llvm::Instruction::BitCast)
5499 Ptr = CE->getOperand(0);
5500 }
5501
5502 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5503 if (!OrigFn)
5504 return nullptr;
5505
5506 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5507
5508 // If the original type is variadic, or if any of the component types
5509 // disagree, we cannot remove the cast.
5510 if (OrigFT->isVarArg() ||
5511 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5512 OrigFT->getReturnType() != CalleeFT->getReturnType())
5513 return nullptr;
5514
5515 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5516 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5517 return nullptr;
5518
5519 return OrigFn;
5520 };
5521
5522 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5523 CalleePtr = OrigFn;
5524 IRFuncTy = OrigFn->getFunctionType();
5525 }
5526
5527 // 3. Perform the actual call.
5528
5529 // Deactivate any cleanups that we're supposed to do immediately before
5530 // the call.
5531 if (!CallArgs.getCleanupsToDeactivate().empty())
5532 deactivateArgCleanupsBeforeCall(*this, CallArgs);
5533
5534 // Assert that the arguments we computed match up. The IR verifier
5535 // will catch this, but this is a common enough source of problems
5536 // during IRGen changes that it's way better for debugging to catch
5537 // it ourselves here.
5538#ifndef NDEBUG
5539 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5540 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5541 // Inalloca argument can have different type.
5542 if (IRFunctionArgs.hasInallocaArg() &&
5543 i == IRFunctionArgs.getInallocaArgNo())
5544 continue;
5545 if (i < IRFuncTy->getNumParams())
5546 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5547 }
5548#endif
5549
5550 // Update the largest vector width if any arguments have vector types.
5551 for (unsigned i = 0; i < IRCallArgs.size(); ++i)
5552 LargestVectorWidth = std::max(LargestVectorWidth,
5553 getMaxVectorWidth(IRCallArgs[i]->getType()));
5554
5555 // Compute the calling convention and attributes.
5556 unsigned CallingConv;
5557 llvm::AttributeList Attrs;
5558 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5559 Callee.getAbstractInfo(), Attrs, CallingConv,
5560 /*AttrOnCallSite=*/true,
5561 /*IsThunk=*/false);
5562
5563 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5564 if (FD->hasAttr<StrictFPAttr>())
5565 // All calls within a strictfp function are marked strictfp
5566 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5567
5568 // If -ffast-math is enabled and the function is guarded by an
5569 // '__attribute__((optnone)) adjust the memory attribute so the BE emits the
5570 // library call instead of the intrinsic.
5571 if (FD->hasAttr<OptimizeNoneAttr>() && getLangOpts().FastMath)
5572 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
5573 Attrs);
5574 }
5575 // Add call-site nomerge attribute if exists.
5577 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge);
5578
5579 // Add call-site noinline attribute if exists.
5581 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5582
5583 // Add call-site always_inline attribute if exists.
5585 Attrs =
5586 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5587
5588 // Apply some call-site-specific attributes.
5589 // TODO: work this into building the attribute set.
5590
5591 // Apply always_inline to all calls within flatten functions.
5592 // FIXME: should this really take priority over __try, below?
5593 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5595 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5596 Attrs =
5597 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5598 }
5599
5600 // Disable inlining inside SEH __try blocks.
5601 if (isSEHTryScope()) {
5602 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5603 }
5604
5605 // Decide whether to use a call or an invoke.
5606 bool CannotThrow;
5608 // SEH cares about asynchronous exceptions, so everything can "throw."
5609 CannotThrow = false;
5610 } else if (isCleanupPadScope() &&
5612 // The MSVC++ personality will implicitly terminate the program if an
5613 // exception is thrown during a cleanup outside of a try/catch.
5614 // We don't need to model anything in IR to get this behavior.
5615 CannotThrow = true;
5616 } else {
5617 // Otherwise, nounwind call sites will never throw.
5618 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5619
5620 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5621 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5622 CannotThrow = true;
5623 }
5624
5625 // If we made a temporary, be sure to clean up after ourselves. Note that we
5626 // can't depend on being inside of an ExprWithCleanups, so we need to manually
5627 // pop this cleanup later on. Being eager about this is OK, since this
5628 // temporary is 'invisible' outside of the callee.
5629 if (UnusedReturnSizePtr)
5630 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5631 UnusedReturnSizePtr);
5632
5633 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5634
5636 getBundlesForFunclet(CalleePtr);
5637
5638 if (SanOpts.has(SanitizerKind::KCFI) &&
5639 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5640 EmitKCFIOperandBundle(ConcreteCallee, BundleList);
5641
5642 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5643 if (FD->hasAttr<StrictFPAttr>())
5644 // All calls within a strictfp function are marked strictfp
5645 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5646
5647 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5648 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5649
5650 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5651 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5652
5653 // Emit the actual call/invoke instruction.
5654 llvm::CallBase *CI;
5655 if (!InvokeDest) {
5656 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5657 } else {
5658 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5659 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5660 BundleList);
5661 EmitBlock(Cont);
5662 }
5663 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5664 CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) {
5666 }
5667 if (callOrInvoke)
5668 *callOrInvoke = CI;
5669
5670 // If this is within a function that has the guard(nocf) attribute and is an
5671 // indirect call, add the "guard_nocf" attribute to this call to indicate that
5672 // Control Flow Guard checks should not be added, even if the call is inlined.
5673 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5674 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5675 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5676 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf");
5677 }
5678 }
5679
5680 // Apply the attributes and calling convention.
5681 CI->setAttributes(Attrs);
5682 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5683
5684 // Apply various metadata.
5685
5686 if (!CI->getType()->isVoidTy())
5687 CI->setName("call");
5688
5689 // Update largest vector width from the return type.
5690 LargestVectorWidth =
5691 std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
5692
5693 // Insert instrumentation or attach profile metadata at indirect call sites.
5694 // For more details, see the comment before the definition of
5695 // IPVK_IndirectCallTarget in InstrProfData.inc.
5696 if (!CI->getCalledFunction())
5697 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5698 CI, CalleePtr);
5699
5700 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5701 // optimizer it can aggressively ignore unwind edges.
5702 if (CGM.getLangOpts().ObjCAutoRefCount)
5703 AddObjCARCExceptionMetadata(CI);
5704
5705 // Set tail call kind if necessary.
5706 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5707 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5708 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5709 else if (IsMustTail)
5710 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5711 }
5712
5713 // Add metadata for calls to MSAllocator functions
5714 if (getDebugInfo() && TargetDecl &&
5715 TargetDecl->hasAttr<MSAllocatorAttr>())
5717
5718 // Add metadata if calling an __attribute__((error(""))) or warning fn.
5719 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) {
5720 llvm::ConstantInt *Line =
5721 llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding());
5722 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line);
5723 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD});
5724 CI->setMetadata("srcloc", MDT);
5725 }
5726
5727 // 4. Finish the call.
5728
5729 // If the call doesn't return, finish the basic block and clear the
5730 // insertion point; this allows the rest of IRGen to discard
5731 // unreachable code.
5732 if (CI->doesNotReturn()) {
5733 if (UnusedReturnSizePtr)
5735
5736 // Strip away the noreturn attribute to better diagnose unreachable UB.
5737 if (SanOpts.has(SanitizerKind::Unreachable)) {
5738 // Also remove from function since CallBase::hasFnAttr additionally checks
5739 // attributes of the called function.
5740 if (auto *F = CI->getCalledFunction())
5741 F->removeFnAttr(llvm::Attribute::NoReturn);
5742 CI->removeFnAttr(llvm::Attribute::NoReturn);
5743
5744 // Avoid incompatibility with ASan which relies on the `noreturn`
5745 // attribute to insert handler calls.
5746 if (SanOpts.hasOneOf(SanitizerKind::Address |
5747 SanitizerKind::KernelAddress)) {
5748 SanitizerScope SanScope(this);
5749 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5750 Builder.SetInsertPoint(CI);
5751 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5752 llvm::FunctionCallee Fn =
5753 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5755 }
5756 }
5757
5758 EmitUnreachable(Loc);
5759 Builder.ClearInsertionPoint();
5760
5761 // FIXME: For now, emit a dummy basic block because expr emitters in
5762 // generally are not ready to handle emitting expressions at unreachable
5763 // points.
5765
5766 // Return a reasonable RValue.
5767 return GetUndefRValue(RetTy);
5768 }
5769
5770 // If this is a musttail call, return immediately. We do not branch to the
5771 // epilogue in this case.
5772 if (IsMustTail) {
5773 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end();
5774 ++it) {
5775 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it);
5776 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
5777 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups");
5778 }
5779 if (CI->getType()->isVoidTy())
5780 Builder.CreateRetVoid();
5781 else
5782 Builder.CreateRet(CI);
5783 Builder.ClearInsertionPoint();
5785 return GetUndefRValue(RetTy);
5786 }
5787
5788 // Perform the swifterror writeback.
5789 if (swiftErrorTemp.isValid()) {
5790 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5791 Builder.CreateStore(errorResult, swiftErrorArg);
5792 }
5793
5794 // Emit any call-associated writebacks immediately. Arguably this
5795 // should happen after any return-value munging.
5796 if (CallArgs.hasWritebacks())
5797 emitWritebacks(*this, CallArgs);
5798
5799 // The stack cleanup for inalloca arguments has to run out of the normal
5800 // lexical order, so deactivate it and run it manually here.
5801 CallArgs.freeArgumentMemory(*this);
5802
5803 // Extract the return value.
5804 RValue Ret = [&] {
5805 switch (RetAI.getKind()) {
5807 auto coercionType = RetAI.getCoerceAndExpandType();
5808
5809 Address addr = SRetPtr.withElementType(coercionType);
5810
5811 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5812 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5813
5814 unsigned unpaddedIndex = 0;
5815 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5816 llvm::Type *eltType = coercionType->getElementType(i);
5817 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5818 Address eltAddr = Builder.CreateStructGEP(addr, i);
5819 llvm::Value *elt = CI;
5820 if (requiresExtract)
5821 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5822 else
5823 assert(unpaddedIndex == 0);
5824 Builder.CreateStore(elt, eltAddr);
5825 }
5826 [[fallthrough]];
5827 }
5828
5830 case ABIArgInfo::Indirect: {
5831 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5832 if (UnusedReturnSizePtr)
5834 return ret;
5835 }
5836
5837 case ABIArgInfo::Ignore:
5838 // If we are ignoring an argument that had a result, make sure to
5839 // construct the appropriate return value for our caller.
5840 return GetUndefRValue(RetTy);
5841
5842 case ABIArgInfo::Extend:
5843 case ABIArgInfo::Direct: {
5844 llvm::Type *RetIRTy = ConvertType(RetTy);
5845 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5846 switch (getEvaluationKind(RetTy)) {
5847 case TEK_Complex: {
5848 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5849 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5850 return RValue::getComplex(std::make_pair(Real, Imag));
5851 }
5852 case TEK_Aggregate: {
5853 Address DestPtr = ReturnValue.getValue();
5854 bool DestIsVolatile = ReturnValue.isVolatile();
5855
5856 if (!DestPtr.isValid()) {
5857 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5858 DestIsVolatile = false;
5859 }
5860 EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5861 return RValue::getAggregate(DestPtr);
5862 }
5863 case TEK_Scalar: {
5864 // If the argument doesn't match, perform a bitcast to coerce it. This
5865 // can happen due to trivial type mismatches.
5866 llvm::Value *V = CI;
5867 if (V->getType() != RetIRTy)
5868 V = Builder.CreateBitCast(V, RetIRTy);
5869 return RValue::get(V);
5870 }
5871 }
5872 llvm_unreachable("bad evaluation kind");
5873 }
5874
5875 // If coercing a fixed vector from a scalable vector for ABI
5876 // compatibility, and the types match, use the llvm.vector.extract
5877 // intrinsic to perform the conversion.
5878 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
5879 llvm::Value *V = CI;
5880 if (auto *ScalableSrcTy =
5881 dyn_cast<llvm::ScalableVectorType>(V->getType())) {
5882 if (FixedDstTy->getElementType() == ScalableSrcTy->getElementType()) {
5883 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
5884 V = Builder.CreateExtractVector(FixedDstTy, V, Zero, "cast.fixed");
5885 return RValue::get(V);
5886 }
5887 }
5888 }
5889
5890 Address DestPtr = ReturnValue.getValue();
5891 bool DestIsVolatile = ReturnValue.isVolatile();
5892
5893 if (!DestPtr.isValid()) {
5894 DestPtr = CreateMemTemp(RetTy, "coerce");
5895 DestIsVolatile = false;
5896 }
5897
5898 // An empty record can overlap other data (if declared with
5899 // no_unique_address); omit the store for such types - as there is no
5900 // actual data to store.
5901 if (!isEmptyRecord(getContext(), RetTy, true)) {
5902 // If the value is offset in memory, apply the offset now.
5903 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5904 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5905 }
5906
5907 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5908 }
5909
5910 case ABIArgInfo::Expand:
5912 llvm_unreachable("Invalid ABI kind for return argument");
5913 }
5914
5915 llvm_unreachable("Unhandled ABIArgInfo::Kind");
5916 } ();
5917
5918 // Emit the assume_aligned check on the return value.
5919 if (Ret.isScalar() && TargetDecl) {
5920 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5921 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5922 }
5923
5924 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5925 // we can't use the full cleanup mechanism.
5926 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5927 LifetimeEnd.Emit(*this, /*Flags=*/{});
5928
5929 if (!ReturnValue.isExternallyDestructed() &&
5931 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5932 RetTy);
5933
5934 return Ret;
5935}
5936
5938 if (isVirtual()) {
5939 const CallExpr *CE = getVirtualCallExpr();
5942 CE ? CE->getBeginLoc() : SourceLocation());
5943 }
5944
5945 return *this;
5946}
5947
5948/* VarArg handling */
5949
5951 VAListAddr = VE->isMicrosoftABI()
5953 : EmitVAListRef(VE->getSubExpr());
5954 QualType Ty = VE->getType();
5955 if (VE->isMicrosoftABI())
5956 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5957 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5958}
#define V(N, I)
Definition: ASTContext.h:3259
StringRef P
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:152
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:4042
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
Definition: CGCall.cpp:3719
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1157
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
Definition: CGCall.cpp:3478
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
Definition: CGCall.cpp:107
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2896
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1426
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:4047
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
Definition: CGCall.cpp:3598
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:379
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:4117
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1762
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:3378
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
Definition: CGCall.cpp:4428
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
Definition: CGCall.cpp:2154
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
Definition: CGCall.cpp:4216
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
Definition: CGCall.cpp:2033
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:585
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1262
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:395
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsWindows)
Definition: CGCall.cpp:209
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:988
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:98
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
Definition: CGCall.cpp:2190
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:4205
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
Definition: CGCall.cpp:1861
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:4188
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
Definition: CGCall.cpp:4194
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2876
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1362
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:387
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:185
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:122
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
Definition: CGCall.cpp:2263
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
Definition: CGCall.cpp:1881
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
Definition: CGCall.cpp:2285
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:1030
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
Definition: CGCall.cpp:2241
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
Definition: CGCall.cpp:4410
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:934
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
Definition: CGCall.cpp:3702
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:3535
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition: CGCall.cpp:287
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:3390
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1172
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:3517
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:4122
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
Definition: CGCall.cpp:1826
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
Definition: CGCall.cpp:1875
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1208
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
Definition: CGCall.cpp:1799
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
Definition: CGCall.cpp:4976
CodeGenFunction::ComplexPairTy ComplexPairTy
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
Definition: MachO.h:40
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
static bool isInstanceMethod(const Decl *D)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2742
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin=false) const
Retrieves the default calling convention for the current target.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1113
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
Definition: ASTContext.h:2058
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
CanQualType IntTy
Definition: ASTContext.h:1095
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2315
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1086
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:752
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2319
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3147
Attr - This represents one attribute.
Definition: Attr.h:42
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
Definition: Expr.cpp:2488
This class is used for builtin types like 'int'.
Definition: Type.h:2740
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2528
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2792
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2053
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
Definition: DeclCXX.cpp:2460
bool isVirtual() const
Definition: DeclCXX.h:2108
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2179
Qualifiers getMethodQualifiers() const
Definition: DeclCXX.h:2214
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1974
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:633
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2819
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1618
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:83
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
llvm::StructType * getCoerceAndExpandType() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
CharUnits getIndirectAlign() const
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
Definition: ABIInfo.cpp:42
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
An aligned address.
Definition: Address.h:29
static Address invalid()
Definition: Address.h:46
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:78
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:62
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Value * getPointer() const
Definition: Address.h:51
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:73
bool isValid() const
Definition: Address.h:47
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:57
An aggregate value slot.
Definition: CGValue.h:512
Address getAddress() const
Definition: CGValue.h:650
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:621
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
RValue asRValue() const
Definition: CGValue.h:674
const BlockExpr * BlockExpression
Definition: CGBlocks.h:277
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:97
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:259
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:297
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:89
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:149
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:135
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
Definition: CGCXXABI.h:127
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:162
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:392
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
Definition: CGCall.h:40
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:58
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition: CGCall.h:55
All available information about a concrete callee.
Definition: CGCall.h:62
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Definition: CGCall.cpp:5937
bool isVirtual() const
Definition: CGCall.h:188
Address getThisAddress() const
Definition: CGCall.h:199
const CallExpr * getVirtualCallExpr() const
Definition: CGCall.h:191
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:178
llvm::FunctionType * getVirtualFunctionType() const
Definition: CGCall.h:203
GlobalDecl getVirtualMethodDecl() const
Definition: CGCall.h:195
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:823
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:258
llvm::Instruction * getStackBase() const
Definition: CGCall.h:330
void addUncopiedAggregate(LValue LV, QualType type)
Definition: CGCall.h:284
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:317
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
Definition: CGCall.h:325
bool hasWritebacks() const
Definition: CGCall.h:308
void add(RValue rvalue, QualType type)
Definition: CGCall.h:282
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:335
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:4343
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:4350
writeback_const_range writebacks() const
Definition: CGCall.h:313
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:303
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void PopCleanupBlock(bool FallThroughIsBranchThrough=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
JumpDest ReturnBlock
ReturnBlock - Unified return block.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
const TargetInfo & getTarget() const
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Address EmitVAListRef(const Expr *E)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile)
Build all the stores needed to initialize an aggregate at Dest with the value Val.
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Address CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Type * ConvertType(QualType T)
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
CodeGenTypes & getTypes() const
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
This class organizes the cross-function state that is used while generating LLVM code.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
Definition: CGCall.cpp:1591
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
ObjCEntrypoints & getObjCEntrypoints() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
Definition: CGCall.cpp:1608
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
Definition: CGCall.cpp:1586
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
Definition: CGCall.cpp:2294
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:2322
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
Definition: CGCall.cpp:1581
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
Definition: CGCall.cpp:2147
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
Definition: CGCall.cpp:1814
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:273
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:301
ASTContext & getContext() const
Definition: CodeGenTypes.h:108
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:760
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:109
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
Definition: CGCall.cpp:203
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
Definition: CGCall.cpp:84
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1625
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:318
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:643
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:674
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:527
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:1008
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:481
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:112
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:537
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
Definition: CGCall.cpp:554
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:696
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
Definition: CGCall.cpp:633
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:662
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
Definition: CGCall.cpp:457
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:650
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:50
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
Definition: CGCall.cpp:1752
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:413
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:494
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:328
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
Definition: CGCall.cpp:563
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:723
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
Definition: CGCall.cpp:716
A cleanup scope which generates the cleanup blocks lazily.
Definition: CGCleanup.h:240
EHScopeStack::Cleanup * getCleanup()
Definition: CGCleanup.h:363
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:141
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
iterator end() const
Returns an iterator pointing to the outermost EH scope.
Definition: CGCleanup.h:564
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition: CGCleanup.h:584
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:352
LValue - This represents an lvalue references.
Definition: CGValue.h:171
bool isBitField() const
Definition: CGValue.h:268
bool isSimple() const
Definition: CGValue.h:266
bool isVolatileQualified() const
Definition: CGValue.h:273
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:417
LangAS getAddressSpace() const
Definition: CGValue.h:329
CharUnits getAlignment() const
Definition: CGValue.h:331
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:350
bool isVolatile() const
Definition: CGValue.h:316
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:300
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:281
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:39
bool isScalar() const
Definition: CGValue.h:54
static RValue get(llvm::Value *V)
Definition: CGValue.h:89
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:110
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:96
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:73
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:61
bool isComplex() const
Definition: CGValue.h:55
bool isVolatileQualified() const
Definition: CGValue.h:58
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:68
A class for recording the number of arguments that a function signature requires.
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:356
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
Definition: TargetInfo.h:188
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Definition: TargetInfo.h:366
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:105
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
Definition: TargetInfo.cpp:132
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
Definition: TargetInfo.h:89
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:86
Complex values, per C99 6.2.5p11.
Definition: Type.h:2845
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:3186
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
Definition: DeclCXX.h:3691
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Definition: DeclBase.h:1446
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:85
T * getAttr() const
Definition: DeclBase.h:578
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:564
DeclContext * getDeclContext()
Definition: DeclBase.h:453
bool hasAttr() const
Definition: DeclBase.h:582
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Decl.h:822
This represents one expression.
Definition: Expr.h:110
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3041
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition: Expr.h:825
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:444
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition: Expr.cpp:3904
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3025
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3116
bool isZeroLengthBitField(const ASTContext &Ctx) const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
Definition: Decl.cpp:4564
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
Definition: Decl.h:3119
Represents a function declaration or definition.
Definition: Decl.h:1959
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:2314
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Definition: Type.h:4154
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4199
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:4458
unsigned getNumParams() const
Definition: Type.h:4432
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
Definition: Type.h:4637
bool isVariadic() const
Whether this function prototype is variadic.
Definition: Type.h:4555
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:4550
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:4620
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Definition: Type.h:4616
Wrapper for source info for functions.
Definition: TypeLoc.h:1402
A class which abstracts out some details necessary for making a call.
Definition: Type.h:3910
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:4025
CallingConv getCC() const
Definition: Type.h:3972
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3991
bool getCmseNSCall() const
Definition: Type.h:3960
bool getNoCfCheck() const
Definition: Type.h:3962
unsigned getRegParm() const
Definition: Type.h:3965
bool getNoCallerSavedRegs() const
Definition: Type.h:3961
bool getHasRegParm() const
Definition: Type.h:3963
bool getNoReturn() const
Definition: Type.h:3958
bool getProducesResult() const
Definition: Type.h:3959
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
Definition: Type.h:3825
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3838
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3865
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3799
ExtInfo getExtInfo() const
Definition: Type.h:4128
static ArmStateValue getArmZT0State(unsigned AttrBits)
Definition: Type.h:4086
static ArmStateValue getArmZAState(unsigned AttrBits)
Definition: Type.h:4082
QualType getReturnType() const
Definition: Type.h:4116
@ SME_PStateSMEnabledMask
Definition: Type.h:4060
@ SME_PStateSMCompatibleMask
Definition: Type.h:4061
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:105
const Decl * getDecl() const
Definition: GlobalDecl.h:103
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2499
ConstructorUsingShadowDecl * getShadowDecl() const
Definition: DeclCXX.h:2511
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Definition: LangOptions.h:266
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:418
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
Definition: LangOptions.h:485
FPExceptionModeKind getDefaultExceptionMode() const
Definition: LangOptions.h:710
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
Definition: LangOptions.cpp:49
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:599
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition: Type.h:3674
Describes a module or submodule.
Definition: Module.h:105
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
ObjCCategoryDecl - Represents a category declaration.
Definition: DeclObjC.h:2323
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1575
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Definition: ExprObjC.h:1603
Represents an ObjC class declaration.
Definition: DeclObjC.h:1150
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:140
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:418
ArrayRef< ParmVarDecl * > parameters() const
Definition: DeclObjC.h:373
bool isVariadic() const
Definition: DeclObjC.h:431
bool isDirectMethod() const
True if the method is tagged as objc_direct.
Definition: DeclObjC.cpp:871
QualType getReturnType() const
Definition: DeclObjC.h:329
Represents a parameter to a function.
Definition: Decl.h:1749
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2898
QualType getPointeeType() const
Definition: Type.h:2908
A (possibly-)qualified type.
Definition: Type.h:737
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:6979
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2665
@ DK_cxx_destructor
Definition: Type.h:1313
@ DK_nontrivial_c_struct
Definition: Type.h:1316
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7027
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6942
QualType getCanonicalType() const
Definition: Type.h:6954
bool isConstQualified() const
Determine whether this type is const-qualified.
Definition: Type.h:6974
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1323
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:175
LangAS getAddressSpace() const
Definition: Type.h:378
Represents a struct/union/class.
Definition: Decl.h:4133
bool hasFlexibleArrayMember() const
Definition: Decl.h:4166
field_iterator field_end() const
Definition: Decl.h:4342
field_range fields() const
Definition: Decl.h:4339
bool isParamDestroyedInCallee() const
Definition: Decl.h:4275
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition: Decl.h:4324
field_iterator field_begin() const
Definition: Decl.cpp:5035
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5092
RecordDecl * getDecl() const
Definition: Type.h:5102
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:3009
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool isUnion() const
Definition: Decl.h:3755
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
Definition: TargetCXXABI.h:188
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
Definition: TargetCXXABI.h:136
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1220
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:942
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1291
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:948
Options for controlling the target.
Definition: TargetOptions.h:26
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
Definition: TargetOptions.h:58
std::string TuneCPU
If given, the name of the target CPU to tune code for.
Definition: TargetOptions.h:39
std::string CPU
If given, the name of the target CPU to generate code for.
Definition: TargetOptions.h:36
The base class of the type hierarchy.
Definition: Type.h:1606
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1819
bool isBlockPointerType() const
Definition: Type.h:7162
bool isVoidType() const
Definition: Type.h:7443
bool isIncompleteArrayType() const
Definition: Type.h:7228
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
Definition: Type.cpp:2289
bool isPointerType() const
Definition: Type.h:7154
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:7479
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7724
bool isReferenceType() const
Definition: Type.h:7166
bool isScalarType() const
Definition: Type.h:7538
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:651
bool isBitIntType() const
Definition: Type.h:7378
QualType getCanonicalTypeInternal() const
Definition: Type.h:2703
bool isMemberPointerType() const
Definition: Type.h:7202
bool isObjectType() const
Determine whether this type is an object type.
Definition: Type.h:2175
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2299
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition: Type.cpp:2195
bool isAnyPointerType() const
Definition: Type.h:7158
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:7657
bool isNullPtrType() const
Definition: Type.h:7472
bool isObjCRetainableType() const
Definition: Type.cpp:4758
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.cpp:1823
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2182
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4661
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:4682
const Expr * getSubExpr() const
Definition: Expr.h:4677
QualType getType() const
Definition: Decl.h:717
Represents a variable declaration or definition.
Definition: Decl.h:918
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Definition: Decl.cpp:2820
Represents a GCC generic vector type.
Definition: Type.h:3512
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Definition: SPIR.cpp:112
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
Definition: CGCall.cpp:2064
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:3773
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
Definition: Interp.h:217
bool This(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1834
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1809
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1369
The JSON file list parser is used to communicate input to InstallAPI.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
@ Ctor_DefaultClosure
Default closure variant of a ctor.
Definition: ABI.h:29
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
Definition: ABI.h:28
@ Ctor_Complete
Complete object ctor.
Definition: ABI.h:25
@ OpenCL
Definition: LangStandard.h:64
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
Definition: Specifiers.h:148
@ Result
The result type of a method or function.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
@ CanPassInRegs
The argument of this type can be passed directly in registers.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:275
@ CC_X86Pascal
Definition: Specifiers.h:281
@ CC_Swift
Definition: Specifiers.h:290
@ CC_IntelOclBicc
Definition: Specifiers.h:287
@ CC_OpenCLKernel
Definition: Specifiers.h:289
@ CC_PreserveMost
Definition: Specifiers.h:292
@ CC_Win64
Definition: Specifiers.h:282
@ CC_X86ThisCall
Definition: Specifiers.h:279
@ CC_AArch64VectorCall
Definition: Specifiers.h:294
@ CC_AAPCS
Definition: Specifiers.h:285
@ CC_PreserveNone
Definition: Specifiers.h:298
@ CC_C
Definition: Specifiers.h:276
@ CC_AMDGPUKernelCall
Definition: Specifiers.h:296
@ CC_M68kRTD
Definition: Specifiers.h:297
@ CC_SwiftAsync
Definition: Specifiers.h:291
@ CC_X86RegCall
Definition: Specifiers.h:284
@ CC_X86VectorCall
Definition: Specifiers.h:280
@ CC_SpirFunction
Definition: Specifiers.h:288
@ CC_AArch64SVEPCS
Definition: Specifiers.h:295
@ CC_X86StdCall
Definition: Specifiers.h:277
@ CC_X86_64SysV
Definition: Specifiers.h:283
@ CC_PreserveAll
Definition: Specifiers.h:293
@ CC_X86FastCall
Definition: Specifiers.h:278
@ CC_AAPCS_VFP
Definition: Specifiers.h:286
unsigned long uint64_t
Definition: Format.h:5304
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Definition: complex_cmath.h:40
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
Definition: CGCXXABI.h:355
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:271
LValue Source
The original argument.
Definition: CGCall.h:265
Address Temporary
The temporary alloca.
Definition: CGCall.h:268
LValue getKnownLValue() const
Definition: CGCall.h:238
RValue getKnownRValue() const
Definition: CGCall.h:242
void copyInto(CodeGenFunction &CGF, Address A) const
Definition: CGCall.cpp:4632
bool hasLValue() const
Definition: CGCall.h:231
RValue getRValue(CodeGenFunction &CGF) const
Definition: CGCall.cpp:4622
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:640
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:165
Iterator for iterating over Stmt * arrays that contain only T *.
Definition: Stmt.h:1314