clang  8.0.0svn
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
47  switch (CC) {
48  default: return llvm::CallingConv::C;
49  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53  case CC_Win64: return llvm::CallingConv::Win64;
54  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58  // TODO: Add support for __pascal to LLVM.
60  // TODO: Add support for __vectorcall to LLVM.
61  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66  case CC_Swift: return llvm::CallingConv::Swift;
67  }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
71 /// qualification.
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76 }
77 
78 /// Returns the canonical formal type of the given C++ method.
80  return MD->getType()->getCanonicalTypeUnqualified()
82 }
83 
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type. Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
90 }
91 
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
96  // When translating an unprototyped function type, always use a
97  // variadic type.
98  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99  /*instanceMethod=*/false,
100  /*chainCall=*/false, None,
101  FTNP->getExtInfo(), {}, RequiredArgs(0));
102 }
103 
106  const FunctionProtoType *proto,
107  unsigned prefixArgs,
108  unsigned totalArgs) {
109  assert(proto->hasExtParameterInfos());
110  assert(paramInfos.size() <= prefixArgs);
111  assert(proto->getNumParams() + prefixArgs <= totalArgs);
112 
113  paramInfos.reserve(totalArgs);
114 
115  // Add default infos for any prefix args that don't already have infos.
116  paramInfos.resize(prefixArgs);
117 
118  // Add infos for the prototype.
119  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120  paramInfos.push_back(ParamInfo);
121  // pass_object_size params have no parameter info.
122  if (ParamInfo.hasPassObjectSize())
123  paramInfos.emplace_back();
124  }
125 
126  assert(paramInfos.size() <= totalArgs &&
127  "Did we forget to insert pass_object_size args?");
128  // Add default infos for the variadic and/or suffix arguments.
129  paramInfos.resize(totalArgs);
130 }
131 
132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
138  // Fast path: don't touch param info if we don't need to.
139  if (!FPT->hasExtParameterInfos()) {
140  assert(paramInfos.empty() &&
141  "We have paramInfos, but the prototype doesn't?");
142  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143  return;
144  }
145 
146  unsigned PrefixSize = prefix.size();
147  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148  // parameters; the only thing that can change this is the presence of
149  // pass_object_size. So, we preallocate for the common case.
150  prefix.reserve(prefix.size() + FPT->getNumParams());
151 
152  auto ExtInfos = FPT->getExtParameterInfos();
153  assert(ExtInfos.size() == FPT->getNumParams());
154  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155  prefix.push_back(FPT->getParamType(I));
156  if (ExtInfos[I].hasPassObjectSize())
157  prefix.push_back(CGT.getContext().getSizeType());
158  }
159 
160  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161  prefix.size());
162 }
163 
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
170  const FunctionDecl *FD) {
171  SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
172  RequiredArgs Required =
173  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174  // FIXME: Kill copy.
175  appendParameterTypes(CGT, prefix, paramInfos, FTP);
176  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177 
178  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179  /*chainCall=*/false, prefix,
180  FTP->getExtInfo(), paramInfos,
181  Required);
182 }
183 
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
188  const FunctionDecl *FD) {
190  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191  FTP, FD);
192 }
193 
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195  // Set the appropriate calling convention for the Function.
196  if (D->hasAttr<StdCallAttr>())
197  return CC_X86StdCall;
198 
199  if (D->hasAttr<FastCallAttr>())
200  return CC_X86FastCall;
201 
202  if (D->hasAttr<RegCallAttr>())
203  return CC_X86RegCall;
204 
205  if (D->hasAttr<ThisCallAttr>())
206  return CC_X86ThisCall;
207 
208  if (D->hasAttr<VectorCallAttr>())
209  return CC_X86VectorCall;
210 
211  if (D->hasAttr<PascalAttr>())
212  return CC_X86Pascal;
213 
214  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216 
217  if (D->hasAttr<IntelOclBiccAttr>())
218  return CC_IntelOclBicc;
219 
220  if (D->hasAttr<MSABIAttr>())
221  return IsWindows ? CC_C : CC_Win64;
222 
223  if (D->hasAttr<SysVABIAttr>())
224  return IsWindows ? CC_X86_64SysV : CC_C;
225 
226  if (D->hasAttr<PreserveMostAttr>())
227  return CC_PreserveMost;
228 
229  if (D->hasAttr<PreserveAllAttr>())
230  return CC_PreserveAll;
231 
232  return CC_C;
233 }
234 
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 /// so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
243  const FunctionProtoType *FTP,
244  const CXXMethodDecl *MD) {
246 
247  // Add the 'this' pointer.
248  if (RD)
249  argTypes.push_back(GetThisType(Context, RD));
250  else
251  argTypes.push_back(Context.VoidPtrTy);
252 
254  *this, true, argTypes,
256 }
257 
258 /// Set calling convention for CUDA/HIP kernel.
260  const FunctionDecl *FD) {
261  if (FD->hasAttr<CUDAGlobalAttr>()) {
262  const FunctionType *FT = FTy->getAs<FunctionType>();
264  FTy = FT->getCanonicalTypeUnqualified();
265  }
266 }
267 
268 /// Arrange the argument and result information for a declaration or
269 /// definition of the given C++ non-static member function. The
270 /// member function must be an ordinary function, i.e. not a
271 /// constructor or destructor.
272 const CGFunctionInfo &
274  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
275  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
276 
277  CanQualType FT = GetFormalType(MD).getAs<Type>();
278  setCUDAKernelCallingConvention(FT, CGM, MD);
279  auto prototype = FT.getAs<FunctionProtoType>();
280 
281  if (MD->isInstance()) {
282  // The abstract case is perfectly fine.
283  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
284  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
285  }
286 
287  return arrangeFreeFunctionType(prototype, MD);
288 }
289 
291  const InheritedConstructor &Inherited, CXXCtorType Type) {
292  // Parameters are unnecessary if we're constructing a base class subobject
293  // and the inherited constructor lives in a virtual base.
294  return Type == Ctor_Complete ||
295  !Inherited.getShadowDecl()->constructsVirtualBase() ||
296  !Target.getCXXABI().hasConstructorVariants();
297  }
298 
299 const CGFunctionInfo &
301  StructorType Type) {
302 
305  argTypes.push_back(GetThisType(Context, MD->getParent()));
306 
307  bool PassParams = true;
308 
309  GlobalDecl GD;
310  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
311  GD = GlobalDecl(CD, toCXXCtorType(Type));
312 
313  // A base class inheriting constructor doesn't get forwarded arguments
314  // needed to construct a virtual base (or base class thereof).
315  if (auto Inherited = CD->getInheritedConstructor())
316  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
317  } else {
318  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
319  GD = GlobalDecl(DD, toCXXDtorType(Type));
320  }
321 
323 
324  // Add the formal parameters.
325  if (PassParams)
326  appendParameterTypes(*this, argTypes, paramInfos, FTP);
327 
328  CGCXXABI::AddedStructorArgs AddedArgs =
329  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
330  if (!paramInfos.empty()) {
331  // Note: prefix implies after the first param.
332  if (AddedArgs.Prefix)
333  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
335  if (AddedArgs.Suffix)
336  paramInfos.append(AddedArgs.Suffix,
338  }
339 
340  RequiredArgs required =
341  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
343 
344  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
345  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
346  ? argTypes.front()
347  : TheCXXABI.hasMostDerivedReturn(GD)
348  ? CGM.getContext().VoidPtrTy
349  : Context.VoidTy;
350  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
351  /*chainCall=*/false, argTypes, extInfo,
352  paramInfos, required);
353 }
354 
357  SmallVector<CanQualType, 16> argTypes;
358  for (auto &arg : args)
359  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
360  return argTypes;
361 }
362 
363 static SmallVector<CanQualType, 16>
365  SmallVector<CanQualType, 16> argTypes;
366  for (auto &arg : args)
367  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
368  return argTypes;
369 }
370 
373  unsigned prefixArgs, unsigned totalArgs) {
375  if (proto->hasExtParameterInfos()) {
376  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
377  }
378  return result;
379 }
380 
381 /// Arrange a call to a C++ method, passing the given arguments.
382 ///
383 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
384 /// parameter.
385 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
386 /// args.
387 /// PassProtoArgs indicates whether `args` has args for the parameters in the
388 /// given CXXConstructorDecl.
389 const CGFunctionInfo &
391  const CXXConstructorDecl *D,
392  CXXCtorType CtorKind,
393  unsigned ExtraPrefixArgs,
394  unsigned ExtraSuffixArgs,
395  bool PassProtoArgs) {
396  // FIXME: Kill copy.
398  for (const auto &Arg : args)
399  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
400 
401  // +1 for implicit this, which should always be args[0].
402  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
403 
405  RequiredArgs Required =
406  RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
407  GlobalDecl GD(D, CtorKind);
408  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
409  ? ArgTypes.front()
410  : TheCXXABI.hasMostDerivedReturn(GD)
411  ? CGM.getContext().VoidPtrTy
412  : Context.VoidTy;
413 
414  FunctionType::ExtInfo Info = FPT->getExtInfo();
416  // If the prototype args are elided, we should only have ABI-specific args,
417  // which never have param info.
418  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
419  // ABI-specific suffix arguments are treated the same as variadic arguments.
420  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
421  ArgTypes.size());
422  }
423  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
424  /*chainCall=*/false, ArgTypes, Info,
425  ParamInfos, Required);
426 }
427 
428 /// Arrange the argument and result information for the declaration or
429 /// definition of the given function.
430 const CGFunctionInfo &
432  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
433  if (MD->isInstance())
434  return arrangeCXXMethodDeclaration(MD);
435 
437 
438  assert(isa<FunctionType>(FTy));
439  setCUDAKernelCallingConvention(FTy, CGM, FD);
440 
441  // When declaring a function without a prototype, always use a
442  // non-variadic type.
445  noProto->getReturnType(), /*instanceMethod=*/false,
446  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
447  }
448 
450 }
451 
452 /// Arrange the argument and result information for the declaration or
453 /// definition of an Objective-C method.
454 const CGFunctionInfo &
456  // It happens that this is the same as a call with no optional
457  // arguments, except also using the formal 'self' type.
459 }
460 
461 /// Arrange the argument and result information for the function type
462 /// through which to perform a send to the given Objective-C method,
463 /// using the given receiver type. The receiver type is not always
464 /// the 'self' type of the method or even an Objective-C pointer type.
465 /// This is *not* the right method for actually performing such a
466 /// message send, due to the possibility of optional arguments.
467 const CGFunctionInfo &
469  QualType receiverType) {
472  argTys.push_back(Context.getCanonicalParamType(receiverType));
473  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
474  // FIXME: Kill copy?
475  for (const auto *I : MD->parameters()) {
476  argTys.push_back(Context.getCanonicalParamType(I->getType()));
478  I->hasAttr<NoEscapeAttr>());
479  extParamInfos.push_back(extParamInfo);
480  }
481 
482  FunctionType::ExtInfo einfo;
483  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
484  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
485 
486  if (getContext().getLangOpts().ObjCAutoRefCount &&
487  MD->hasAttr<NSReturnsRetainedAttr>())
488  einfo = einfo.withProducesResult(true);
489 
490  RequiredArgs required =
491  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
492 
494  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
495  /*chainCall=*/false, argTys, einfo, extParamInfos, required);
496 }
497 
498 const CGFunctionInfo &
500  const CallArgList &args) {
501  auto argTypes = getArgTypesForCall(Context, args);
502  FunctionType::ExtInfo einfo;
503 
505  GetReturnType(returnType), /*instanceMethod=*/false,
506  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
507 }
508 
509 const CGFunctionInfo &
511  // FIXME: Do we need to handle ObjCMethodDecl?
512  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
513 
514  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
516 
517  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
519 
520  return arrangeFunctionDeclaration(FD);
521 }
522 
523 /// Arrange a thunk that takes 'this' as the first parameter followed by
524 /// varargs. Return a void pointer, regardless of the actual return type.
525 /// The body of the thunk will end in a musttail call to a function of the
526 /// correct type, and the caller will bitcast the function to the correct
527 /// prototype.
528 const CGFunctionInfo &
530  assert(MD->isVirtual() && "only methods have thunks");
532  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
533  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534  /*chainCall=*/false, ArgTys,
535  FTP->getExtInfo(), {}, RequiredArgs(1));
536 }
537 
538 const CGFunctionInfo &
540  CXXCtorType CT) {
541  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
542 
545  const CXXRecordDecl *RD = CD->getParent();
546  ArgTys.push_back(GetThisType(Context, RD));
547  if (CT == Ctor_CopyingClosure)
548  ArgTys.push_back(*FTP->param_type_begin());
549  if (RD->getNumVBases() > 0)
550  ArgTys.push_back(Context.IntTy);
552  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554  /*chainCall=*/false, ArgTys,
555  FunctionType::ExtInfo(CC), {},
557 }
558 
559 /// Arrange a call as unto a free function, except possibly with an
560 /// additional number of formal parameters considered required.
561 static const CGFunctionInfo &
563  CodeGenModule &CGM,
564  const CallArgList &args,
565  const FunctionType *fnType,
566  unsigned numExtraRequiredArgs,
567  bool chainCall) {
568  assert(args.size() >= numExtraRequiredArgs);
569 
571 
572  // In most cases, there are no optional arguments.
573  RequiredArgs required = RequiredArgs::All;
574 
575  // If we have a variadic prototype, the required arguments are the
576  // extra prefix plus the arguments in the prototype.
577  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578  if (proto->isVariadic())
579  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
580 
581  if (proto->hasExtParameterInfos())
582  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583  args.size());
584 
585  // If we don't have a prototype at all, but we're supposed to
586  // explicitly use the variadic convention for unprototyped calls,
587  // treat all of the arguments as required but preserve the nominal
588  // possibility of variadics.
589  } else if (CGM.getTargetCodeGenInfo()
590  .isNoProtoCallVariadic(args,
591  cast<FunctionNoProtoType>(fnType))) {
592  required = RequiredArgs(args.size());
593  }
594 
595  // FIXME: Kill copy.
596  SmallVector<CanQualType, 16> argTypes;
597  for (const auto &arg : args)
598  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
600  /*instanceMethod=*/false, chainCall,
601  argTypes, fnType->getExtInfo(), paramInfos,
602  required);
603 }
604 
605 /// Figure out the rules for calling a function with the given formal
606 /// type using the given arguments. The arguments are necessary
607 /// because the function might be unprototyped, in which case it's
608 /// target-dependent in crazy ways.
609 const CGFunctionInfo &
611  const FunctionType *fnType,
612  bool chainCall) {
613  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614  chainCall ? 1 : 0, chainCall);
615 }
616 
617 /// A block function is essentially a free function with an
618 /// extra implicit argument.
619 const CGFunctionInfo &
621  const FunctionType *fnType) {
622  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623  /*chainCall=*/false);
624 }
625 
626 const CGFunctionInfo &
628  const FunctionArgList &params) {
629  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630  auto argTypes = getArgTypesForDeclaration(Context, params);
631 
633  GetReturnType(proto->getReturnType()),
634  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
635  proto->getExtInfo(), paramInfos,
636  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
637 }
638 
639 const CGFunctionInfo &
641  const CallArgList &args) {
642  // FIXME: Kill copy.
644  for (const auto &Arg : args)
645  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
647  GetReturnType(resultType), /*instanceMethod=*/false,
648  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
649  /*paramInfos=*/ {}, RequiredArgs::All);
650 }
651 
652 const CGFunctionInfo &
654  const FunctionArgList &args) {
655  auto argTypes = getArgTypesForDeclaration(Context, args);
656 
658  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
659  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
660 }
661 
662 const CGFunctionInfo &
664  ArrayRef<CanQualType> argTypes) {
666  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
667  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
668 }
669 
670 /// Arrange a call to a C++ method, passing the given arguments.
671 ///
672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
673 /// does not count `this`.
674 const CGFunctionInfo &
676  const FunctionProtoType *proto,
677  RequiredArgs required,
678  unsigned numPrefixArgs) {
679  assert(numPrefixArgs + 1 <= args.size() &&
680  "Emitting a call with less args than the required prefix?");
681  // Add one to account for `this`. It's a bit awkward here, but we don't count
682  // `this` in similar places elsewhere.
683  auto paramInfos =
684  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
685 
686  // FIXME: Kill copy.
687  auto argTypes = getArgTypesForCall(Context, args);
688 
689  FunctionType::ExtInfo info = proto->getExtInfo();
691  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
692  /*chainCall=*/false, argTypes, info, paramInfos, required);
693 }
694 
697  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
699 }
700 
701 const CGFunctionInfo &
703  const CallArgList &args) {
704  assert(signature.arg_size() <= args.size());
705  if (signature.arg_size() == args.size())
706  return signature;
707 
709  auto sigParamInfos = signature.getExtParameterInfos();
710  if (!sigParamInfos.empty()) {
711  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
712  paramInfos.resize(args.size());
713  }
714 
715  auto argTypes = getArgTypesForCall(Context, args);
716 
717  assert(signature.getRequiredArgs().allowsOptionalArgs());
718  return arrangeLLVMFunctionInfo(signature.getReturnType(),
719  signature.isInstanceMethod(),
720  signature.isChainCall(),
721  argTypes,
722  signature.getExtInfo(),
723  paramInfos,
724  signature.getRequiredArgs());
725 }
726 
727 namespace clang {
728 namespace CodeGen {
730 }
731 }
732 
733 /// Arrange the argument and result information for an abstract value
734 /// of a given function type. This is the method which all of the
735 /// above functions ultimately defer to.
736 const CGFunctionInfo &
738  bool instanceMethod,
739  bool chainCall,
740  ArrayRef<CanQualType> argTypes,
743  RequiredArgs required) {
744  assert(llvm::all_of(argTypes,
745  [](CanQualType T) { return T.isCanonicalAsParam(); }));
746 
747  // Lookup or create unique function info.
748  llvm::FoldingSetNodeID ID;
749  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
750  required, resultType, argTypes);
751 
752  void *insertPos = nullptr;
753  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
754  if (FI)
755  return *FI;
756 
757  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
758 
759  // Construct the function info. We co-allocate the ArgInfos.
760  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
761  paramInfos, resultType, argTypes, required);
762  FunctionInfos.InsertNode(FI, insertPos);
763 
764  bool inserted = FunctionsBeingProcessed.insert(FI).second;
765  (void)inserted;
766  assert(inserted && "Recursively being processed?");
767 
768  // Compute ABI information.
769  if (CC == llvm::CallingConv::SPIR_KERNEL) {
770  // Force target independent argument handling for the host visible
771  // kernel functions.
772  computeSPIRKernelABIInfo(CGM, *FI);
773  } else if (info.getCC() == CC_Swift) {
774  swiftcall::computeABIInfo(CGM, *FI);
775  } else {
776  getABIInfo().computeInfo(*FI);
777  }
778 
779  // Loop over all of the computed argument and return value info. If any of
780  // them are direct or extend without a specified coerce type, specify the
781  // default now.
782  ABIArgInfo &retInfo = FI->getReturnInfo();
783  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
784  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
785 
786  for (auto &I : FI->arguments())
787  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
788  I.info.setCoerceToType(ConvertType(I.type));
789 
790  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
791  assert(erased && "Not in set?");
792 
793  return *FI;
794 }
795 
797  bool instanceMethod,
798  bool chainCall,
799  const FunctionType::ExtInfo &info,
800  ArrayRef<ExtParameterInfo> paramInfos,
801  CanQualType resultType,
802  ArrayRef<CanQualType> argTypes,
803  RequiredArgs required) {
804  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
805 
806  void *buffer =
807  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
808  argTypes.size() + 1, paramInfos.size()));
809 
810  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
811  FI->CallingConvention = llvmCC;
812  FI->EffectiveCallingConvention = llvmCC;
813  FI->ASTCallingConvention = info.getCC();
814  FI->InstanceMethod = instanceMethod;
815  FI->ChainCall = chainCall;
816  FI->NoReturn = info.getNoReturn();
817  FI->ReturnsRetained = info.getProducesResult();
818  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
819  FI->NoCfCheck = info.getNoCfCheck();
820  FI->Required = required;
821  FI->HasRegParm = info.getHasRegParm();
822  FI->RegParm = info.getRegParm();
823  FI->ArgStruct = nullptr;
824  FI->ArgStructAlign = 0;
825  FI->NumArgs = argTypes.size();
826  FI->HasExtParameterInfos = !paramInfos.empty();
827  FI->getArgsBuffer()[0].type = resultType;
828  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
829  FI->getArgsBuffer()[i + 1].type = argTypes[i];
830  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
831  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
832  return FI;
833 }
834 
835 /***/
836 
837 namespace {
838 // ABIArgInfo::Expand implementation.
839 
840 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
841 struct TypeExpansion {
842  enum TypeExpansionKind {
843  // Elements of constant arrays are expanded recursively.
844  TEK_ConstantArray,
845  // Record fields are expanded recursively (but if record is a union, only
846  // the field with the largest size is expanded).
847  TEK_Record,
848  // For complex types, real and imaginary parts are expanded recursively.
849  TEK_Complex,
850  // All other types are not expandable.
851  TEK_None
852  };
853 
854  const TypeExpansionKind Kind;
855 
856  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
857  virtual ~TypeExpansion() {}
858 };
859 
860 struct ConstantArrayExpansion : TypeExpansion {
861  QualType EltTy;
862  uint64_t NumElts;
863 
864  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
865  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
866  static bool classof(const TypeExpansion *TE) {
867  return TE->Kind == TEK_ConstantArray;
868  }
869 };
870 
871 struct RecordExpansion : TypeExpansion {
873 
875 
876  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
878  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
879  Fields(std::move(Fields)) {}
880  static bool classof(const TypeExpansion *TE) {
881  return TE->Kind == TEK_Record;
882  }
883 };
884 
885 struct ComplexExpansion : TypeExpansion {
886  QualType EltTy;
887 
888  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
889  static bool classof(const TypeExpansion *TE) {
890  return TE->Kind == TEK_Complex;
891  }
892 };
893 
894 struct NoExpansion : TypeExpansion {
895  NoExpansion() : TypeExpansion(TEK_None) {}
896  static bool classof(const TypeExpansion *TE) {
897  return TE->Kind == TEK_None;
898  }
899 };
900 } // namespace
901 
902 static std::unique_ptr<TypeExpansion>
903 getTypeExpansion(QualType Ty, const ASTContext &Context) {
904  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
905  return llvm::make_unique<ConstantArrayExpansion>(
906  AT->getElementType(), AT->getSize().getZExtValue());
907  }
908  if (const RecordType *RT = Ty->getAs<RecordType>()) {
909  SmallVector<const CXXBaseSpecifier *, 1> Bases;
910  SmallVector<const FieldDecl *, 1> Fields;
911  const RecordDecl *RD = RT->getDecl();
912  assert(!RD->hasFlexibleArrayMember() &&
913  "Cannot expand structure with flexible array.");
914  if (RD->isUnion()) {
915  // Unions can be here only in degenerative cases - all the fields are same
916  // after flattening. Thus we have to use the "largest" field.
917  const FieldDecl *LargestFD = nullptr;
918  CharUnits UnionSize = CharUnits::Zero();
919 
920  for (const auto *FD : RD->fields()) {
921  if (FD->isZeroLengthBitField(Context))
922  continue;
923  assert(!FD->isBitField() &&
924  "Cannot expand structure with bit-field members.");
925  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
926  if (UnionSize < FieldSize) {
927  UnionSize = FieldSize;
928  LargestFD = FD;
929  }
930  }
931  if (LargestFD)
932  Fields.push_back(LargestFD);
933  } else {
934  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
935  assert(!CXXRD->isDynamicClass() &&
936  "cannot expand vtable pointers in dynamic classes");
937  for (const CXXBaseSpecifier &BS : CXXRD->bases())
938  Bases.push_back(&BS);
939  }
940 
941  for (const auto *FD : RD->fields()) {
942  if (FD->isZeroLengthBitField(Context))
943  continue;
944  assert(!FD->isBitField() &&
945  "Cannot expand structure with bit-field members.");
946  Fields.push_back(FD);
947  }
948  }
949  return llvm::make_unique<RecordExpansion>(std::move(Bases),
950  std::move(Fields));
951  }
952  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
953  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
954  }
955  return llvm::make_unique<NoExpansion>();
956 }
957 
958 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
959  auto Exp = getTypeExpansion(Ty, Context);
960  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
961  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
962  }
963  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
964  int Res = 0;
965  for (auto BS : RExp->Bases)
966  Res += getExpansionSize(BS->getType(), Context);
967  for (auto FD : RExp->Fields)
968  Res += getExpansionSize(FD->getType(), Context);
969  return Res;
970  }
971  if (isa<ComplexExpansion>(Exp.get()))
972  return 2;
973  assert(isa<NoExpansion>(Exp.get()));
974  return 1;
975 }
976 
977 void
980  auto Exp = getTypeExpansion(Ty, Context);
981  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
982  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
983  getExpandedTypes(CAExp->EltTy, TI);
984  }
985  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
986  for (auto BS : RExp->Bases)
987  getExpandedTypes(BS->getType(), TI);
988  for (auto FD : RExp->Fields)
989  getExpandedTypes(FD->getType(), TI);
990  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
991  llvm::Type *EltTy = ConvertType(CExp->EltTy);
992  *TI++ = EltTy;
993  *TI++ = EltTy;
994  } else {
995  assert(isa<NoExpansion>(Exp.get()));
996  *TI++ = ConvertType(Ty);
997  }
998 }
999 
1001  ConstantArrayExpansion *CAE,
1002  Address BaseAddr,
1003  llvm::function_ref<void(Address)> Fn) {
1004  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1005  CharUnits EltAlign =
1006  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1007 
1008  for (int i = 0, n = CAE->NumElts; i < n; i++) {
1009  llvm::Value *EltAddr =
1010  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1011  Fn(Address(EltAddr, EltAlign));
1012  }
1013 }
1014 
1015 void CodeGenFunction::ExpandTypeFromArgs(
1017  assert(LV.isSimple() &&
1018  "Unexpected non-simple lvalue during struct expansion.");
1019 
1020  auto Exp = getTypeExpansion(Ty, getContext());
1021  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1022  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1023  [&](Address EltAddr) {
1024  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1025  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1026  });
1027  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1028  Address This = LV.getAddress();
1029  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1030  // Perform a single step derived-to-base conversion.
1031  Address Base =
1032  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1033  /*NullCheckValue=*/false, SourceLocation());
1034  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1035 
1036  // Recurse onto bases.
1037  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1038  }
1039  for (auto FD : RExp->Fields) {
1040  // FIXME: What are the right qualifiers here?
1041  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1042  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1043  }
1044  } else if (isa<ComplexExpansion>(Exp.get())) {
1045  auto realValue = *AI++;
1046  auto imagValue = *AI++;
1047  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1048  } else {
1049  assert(isa<NoExpansion>(Exp.get()));
1050  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1051  }
1052 }
1053 
1054 void CodeGenFunction::ExpandTypeToArgs(
1055  QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1056  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1057  auto Exp = getTypeExpansion(Ty, getContext());
1058  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1059  Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1062  *this, CAExp, Addr, [&](Address EltAddr) {
1063  CallArg EltArg = CallArg(
1064  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1065  CAExp->EltTy);
1066  ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1067  IRCallArgPos);
1068  });
1069  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1070  Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1072  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1073  // Perform a single step derived-to-base conversion.
1074  Address Base =
1075  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1076  /*NullCheckValue=*/false, SourceLocation());
1077  CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1078 
1079  // Recurse onto bases.
1080  ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1081  IRCallArgPos);
1082  }
1083 
1084  LValue LV = MakeAddrLValue(This, Ty);
1085  for (auto FD : RExp->Fields) {
1086  CallArg FldArg =
1087  CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1088  ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1089  IRCallArgPos);
1090  }
1091  } else if (isa<ComplexExpansion>(Exp.get())) {
1093  IRCallArgs[IRCallArgPos++] = CV.first;
1094  IRCallArgs[IRCallArgPos++] = CV.second;
1095  } else {
1096  assert(isa<NoExpansion>(Exp.get()));
1097  auto RV = Arg.getKnownRValue();
1098  assert(RV.isScalar() &&
1099  "Unexpected non-scalar rvalue during struct expansion.");
1100 
1101  // Insert a bitcast as needed.
1102  llvm::Value *V = RV.getScalarVal();
1103  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1104  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1105  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1106 
1107  IRCallArgs[IRCallArgPos++] = V;
1108  }
1109 }
1110 
1111 /// Create a temporary allocation for the purposes of coercion.
1113  CharUnits MinAlign) {
1114  // Don't use an alignment that's worse than what LLVM would prefer.
1115  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1116  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1117 
1118  return CGF.CreateTempAlloca(Ty, Align);
1119 }
1120 
1121 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1122 /// accessing some number of bytes out of it, try to gep into the struct to get
1123 /// at its inner goodness. Dive as deep as possible without entering an element
1124 /// with an in-memory size smaller than DstSize.
1125 static Address
1127  llvm::StructType *SrcSTy,
1128  uint64_t DstSize, CodeGenFunction &CGF) {
1129  // We can't dive into a zero-element struct.
1130  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1131 
1132  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1133 
1134  // If the first elt is at least as large as what we're looking for, or if the
1135  // first element is the same size as the whole struct, we can enter it. The
1136  // comparison must be made on the store size and not the alloca size. Using
1137  // the alloca size may overstate the size of the load.
1138  uint64_t FirstEltSize =
1139  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1140  if (FirstEltSize < DstSize &&
1141  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1142  return SrcPtr;
1143 
1144  // GEP into the first element.
1145  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1146 
1147  // If the first element is a struct, recurse.
1148  llvm::Type *SrcTy = SrcPtr.getElementType();
1149  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1150  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1151 
1152  return SrcPtr;
1153 }
1154 
1155 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1156 /// are either integers or pointers. This does a truncation of the value if it
1157 /// is too large or a zero extension if it is too small.
1158 ///
1159 /// This behaves as if the value were coerced through memory, so on big-endian
1160 /// targets the high bits are preserved in a truncation, while little-endian
1161 /// targets preserve the low bits.
1163  llvm::Type *Ty,
1164  CodeGenFunction &CGF) {
1165  if (Val->getType() == Ty)
1166  return Val;
1167 
1168  if (isa<llvm::PointerType>(Val->getType())) {
1169  // If this is Pointer->Pointer avoid conversion to and from int.
1170  if (isa<llvm::PointerType>(Ty))
1171  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1172 
1173  // Convert the pointer to an integer so we can play with its width.
1174  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1175  }
1176 
1177  llvm::Type *DestIntTy = Ty;
1178  if (isa<llvm::PointerType>(DestIntTy))
1179  DestIntTy = CGF.IntPtrTy;
1180 
1181  if (Val->getType() != DestIntTy) {
1182  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1183  if (DL.isBigEndian()) {
1184  // Preserve the high bits on big-endian targets.
1185  // That is what memory coercion does.
1186  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1187  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1188 
1189  if (SrcSize > DstSize) {
1190  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1191  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1192  } else {
1193  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1194  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1195  }
1196  } else {
1197  // Little-endian targets preserve the low bits. No shifts required.
1198  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1199  }
1200  }
1201 
1202  if (isa<llvm::PointerType>(Ty))
1203  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1204  return Val;
1205 }
1206 
1207 
1208 
1209 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1210 /// a pointer to an object of type \arg Ty, known to be aligned to
1211 /// \arg SrcAlign bytes.
1212 ///
1213 /// This safely handles the case when the src type is smaller than the
1214 /// destination type; in this situation the values of bits which not
1215 /// present in the src are undefined.
1217  CodeGenFunction &CGF) {
1218  llvm::Type *SrcTy = Src.getElementType();
1219 
1220  // If SrcTy and Ty are the same, just do a load.
1221  if (SrcTy == Ty)
1222  return CGF.Builder.CreateLoad(Src);
1223 
1224  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1225 
1226  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1227  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1228  SrcTy = Src.getType()->getElementType();
1229  }
1230 
1231  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1232 
1233  // If the source and destination are integer or pointer types, just do an
1234  // extension or truncation to the desired type.
1235  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1236  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1237  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1238  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1239  }
1240 
1241  // If load is legal, just bitcast the src pointer.
1242  if (SrcSize >= DstSize) {
1243  // Generally SrcSize is never greater than DstSize, since this means we are
1244  // losing bits. However, this can happen in cases where the structure has
1245  // additional padding, for example due to a user specified alignment.
1246  //
1247  // FIXME: Assert that we aren't truncating non-padding bits when have access
1248  // to that information.
1249  Src = CGF.Builder.CreateBitCast(Src,
1250  Ty->getPointerTo(Src.getAddressSpace()));
1251  return CGF.Builder.CreateLoad(Src);
1252  }
1253 
1254  // Otherwise do coercion through memory. This is stupid, but simple.
1255  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1256  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1257  Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
1258  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1259  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1260  false);
1261  return CGF.Builder.CreateLoad(Tmp);
1262 }
1263 
1264 // Function to store a first-class aggregate into memory. We prefer to
1265 // store the elements rather than the aggregate to be more friendly to
1266 // fast-isel.
1267 // FIXME: Do we need to recurse here?
1269  Address Dest, bool DestIsVolatile) {
1270  // Prefer scalar stores to first-class aggregate stores.
1271  if (llvm::StructType *STy =
1272  dyn_cast<llvm::StructType>(Val->getType())) {
1273  const llvm::StructLayout *Layout =
1274  CGF.CGM.getDataLayout().getStructLayout(STy);
1275 
1276  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1277  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1278  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1279  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1280  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1281  }
1282  } else {
1283  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1284  }
1285 }
1286 
1287 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1288 /// where the source and destination may have different types. The
1289 /// destination is known to be aligned to \arg DstAlign bytes.
1290 ///
1291 /// This safely handles the case when the src type is larger than the
1292 /// destination type; the upper bits of the src will be lost.
1294  Address Dst,
1295  bool DstIsVolatile,
1296  CodeGenFunction &CGF) {
1297  llvm::Type *SrcTy = Src->getType();
1298  llvm::Type *DstTy = Dst.getType()->getElementType();
1299  if (SrcTy == DstTy) {
1300  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1301  return;
1302  }
1303 
1304  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1305 
1306  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1307  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1308  DstTy = Dst.getType()->getElementType();
1309  }
1310 
1311  // If the source and destination are integer or pointer types, just do an
1312  // extension or truncation to the desired type.
1313  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1314  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1315  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1316  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1317  return;
1318  }
1319 
1320  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1321 
1322  // If store is legal, just bitcast the src pointer.
1323  if (SrcSize <= DstSize) {
1324  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1325  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1326  } else {
1327  // Otherwise do coercion through memory. This is stupid, but
1328  // simple.
1329 
1330  // Generally SrcSize is never greater than DstSize, since this means we are
1331  // losing bits. However, this can happen in cases where the structure has
1332  // additional padding, for example due to a user specified alignment.
1333  //
1334  // FIXME: Assert that we aren't truncating non-padding bits when have access
1335  // to that information.
1336  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1337  CGF.Builder.CreateStore(Src, Tmp);
1338  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1339  Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
1340  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1341  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1342  false);
1343  }
1344 }
1345 
1347  const ABIArgInfo &info) {
1348  if (unsigned offset = info.getDirectOffset()) {
1349  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1350  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1351  CharUnits::fromQuantity(offset));
1352  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1353  }
1354  return addr;
1355 }
1356 
1357 namespace {
1358 
1359 /// Encapsulates information about the way function arguments from
1360 /// CGFunctionInfo should be passed to actual LLVM IR function.
1361 class ClangToLLVMArgMapping {
1362  static const unsigned InvalidIndex = ~0U;
1363  unsigned InallocaArgNo;
1364  unsigned SRetArgNo;
1365  unsigned TotalIRArgs;
1366 
1367  /// Arguments of LLVM IR function corresponding to single Clang argument.
1368  struct IRArgs {
1369  unsigned PaddingArgIndex;
1370  // Argument is expanded to IR arguments at positions
1371  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1372  unsigned FirstArgIndex;
1373  unsigned NumberOfArgs;
1374 
1375  IRArgs()
1376  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1377  NumberOfArgs(0) {}
1378  };
1379 
1380  SmallVector<IRArgs, 8> ArgInfo;
1381 
1382 public:
1383  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1384  bool OnlyRequiredArgs = false)
1385  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1386  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1387  construct(Context, FI, OnlyRequiredArgs);
1388  }
1389 
1390  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1391  unsigned getInallocaArgNo() const {
1392  assert(hasInallocaArg());
1393  return InallocaArgNo;
1394  }
1395 
1396  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1397  unsigned getSRetArgNo() const {
1398  assert(hasSRetArg());
1399  return SRetArgNo;
1400  }
1401 
1402  unsigned totalIRArgs() const { return TotalIRArgs; }
1403 
1404  bool hasPaddingArg(unsigned ArgNo) const {
1405  assert(ArgNo < ArgInfo.size());
1406  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1407  }
1408  unsigned getPaddingArgNo(unsigned ArgNo) const {
1409  assert(hasPaddingArg(ArgNo));
1410  return ArgInfo[ArgNo].PaddingArgIndex;
1411  }
1412 
1413  /// Returns index of first IR argument corresponding to ArgNo, and their
1414  /// quantity.
1415  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1416  assert(ArgNo < ArgInfo.size());
1417  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1418  ArgInfo[ArgNo].NumberOfArgs);
1419  }
1420 
1421 private:
1422  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1423  bool OnlyRequiredArgs);
1424 };
1425 
1426 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1427  const CGFunctionInfo &FI,
1428  bool OnlyRequiredArgs) {
1429  unsigned IRArgNo = 0;
1430  bool SwapThisWithSRet = false;
1431  const ABIArgInfo &RetAI = FI.getReturnInfo();
1432 
1433  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1434  SwapThisWithSRet = RetAI.isSRetAfterThis();
1435  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1436  }
1437 
1438  unsigned ArgNo = 0;
1439  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1440  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1441  ++I, ++ArgNo) {
1442  assert(I != FI.arg_end());
1443  QualType ArgType = I->type;
1444  const ABIArgInfo &AI = I->info;
1445  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1446  auto &IRArgs = ArgInfo[ArgNo];
1447 
1448  if (AI.getPaddingType())
1449  IRArgs.PaddingArgIndex = IRArgNo++;
1450 
1451  switch (AI.getKind()) {
1452  case ABIArgInfo::Extend:
1453  case ABIArgInfo::Direct: {
1454  // FIXME: handle sseregparm someday...
1455  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1456  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1457  IRArgs.NumberOfArgs = STy->getNumElements();
1458  } else {
1459  IRArgs.NumberOfArgs = 1;
1460  }
1461  break;
1462  }
1463  case ABIArgInfo::Indirect:
1464  IRArgs.NumberOfArgs = 1;
1465  break;
1466  case ABIArgInfo::Ignore:
1467  case ABIArgInfo::InAlloca:
1468  // ignore and inalloca doesn't have matching LLVM parameters.
1469  IRArgs.NumberOfArgs = 0;
1470  break;
1472  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1473  break;
1474  case ABIArgInfo::Expand:
1475  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1476  break;
1477  }
1478 
1479  if (IRArgs.NumberOfArgs > 0) {
1480  IRArgs.FirstArgIndex = IRArgNo;
1481  IRArgNo += IRArgs.NumberOfArgs;
1482  }
1483 
1484  // Skip over the sret parameter when it comes second. We already handled it
1485  // above.
1486  if (IRArgNo == 1 && SwapThisWithSRet)
1487  IRArgNo++;
1488  }
1489  assert(ArgNo == ArgInfo.size());
1490 
1491  if (FI.usesInAlloca())
1492  InallocaArgNo = IRArgNo++;
1493 
1494  TotalIRArgs = IRArgNo;
1495 }
1496 } // namespace
1497 
1498 /***/
1499 
1501  const auto &RI = FI.getReturnInfo();
1502  return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1503 }
1504 
1506  return ReturnTypeUsesSRet(FI) &&
1507  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1508 }
1509 
1511  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1512  switch (BT->getKind()) {
1513  default:
1514  return false;
1515  case BuiltinType::Float:
1517  case BuiltinType::Double:
1519  case BuiltinType::LongDouble:
1521  }
1522  }
1523 
1524  return false;
1525 }
1526 
1528  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1529  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1530  if (BT->getKind() == BuiltinType::LongDouble)
1532  }
1533  }
1534 
1535  return false;
1536 }
1537 
1539  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1540  return GetFunctionType(FI);
1541 }
1542 
1543 llvm::FunctionType *
1545 
1546  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1547  (void)Inserted;
1548  assert(Inserted && "Recursively being processed?");
1549 
1550  llvm::Type *resultType = nullptr;
1551  const ABIArgInfo &retAI = FI.getReturnInfo();
1552  switch (retAI.getKind()) {
1553  case ABIArgInfo::Expand:
1554  llvm_unreachable("Invalid ABI kind for return argument");
1555 
1556  case ABIArgInfo::Extend:
1557  case ABIArgInfo::Direct:
1558  resultType = retAI.getCoerceToType();
1559  break;
1560 
1561  case ABIArgInfo::InAlloca:
1562  if (retAI.getInAllocaSRet()) {
1563  // sret things on win32 aren't void, they return the sret pointer.
1564  QualType ret = FI.getReturnType();
1565  llvm::Type *ty = ConvertType(ret);
1566  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1567  resultType = llvm::PointerType::get(ty, addressSpace);
1568  } else {
1569  resultType = llvm::Type::getVoidTy(getLLVMContext());
1570  }
1571  break;
1572 
1573  case ABIArgInfo::Indirect:
1574  case ABIArgInfo::Ignore:
1575  resultType = llvm::Type::getVoidTy(getLLVMContext());
1576  break;
1577 
1579  resultType = retAI.getUnpaddedCoerceAndExpandType();
1580  break;
1581  }
1582 
1583  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1584  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1585 
1586  // Add type for sret argument.
1587  if (IRFunctionArgs.hasSRetArg()) {
1588  QualType Ret = FI.getReturnType();
1589  llvm::Type *Ty = ConvertType(Ret);
1590  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1591  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1592  llvm::PointerType::get(Ty, AddressSpace);
1593  }
1594 
1595  // Add type for inalloca argument.
1596  if (IRFunctionArgs.hasInallocaArg()) {
1597  auto ArgStruct = FI.getArgStruct();
1598  assert(ArgStruct);
1599  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1600  }
1601 
1602  // Add in all of the required arguments.
1603  unsigned ArgNo = 0;
1605  ie = it + FI.getNumRequiredArgs();
1606  for (; it != ie; ++it, ++ArgNo) {
1607  const ABIArgInfo &ArgInfo = it->info;
1608 
1609  // Insert a padding type to ensure proper alignment.
1610  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1611  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1612  ArgInfo.getPaddingType();
1613 
1614  unsigned FirstIRArg, NumIRArgs;
1615  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1616 
1617  switch (ArgInfo.getKind()) {
1618  case ABIArgInfo::Ignore:
1619  case ABIArgInfo::InAlloca:
1620  assert(NumIRArgs == 0);
1621  break;
1622 
1623  case ABIArgInfo::Indirect: {
1624  assert(NumIRArgs == 1);
1625  // indirect arguments are always on the stack, which is alloca addr space.
1626  llvm::Type *LTy = ConvertTypeForMem(it->type);
1627  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1628  CGM.getDataLayout().getAllocaAddrSpace());
1629  break;
1630  }
1631 
1632  case ABIArgInfo::Extend:
1633  case ABIArgInfo::Direct: {
1634  // Fast-isel and the optimizer generally like scalar values better than
1635  // FCAs, so we flatten them if this is safe to do for this argument.
1636  llvm::Type *argType = ArgInfo.getCoerceToType();
1637  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1638  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1639  assert(NumIRArgs == st->getNumElements());
1640  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1641  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1642  } else {
1643  assert(NumIRArgs == 1);
1644  ArgTypes[FirstIRArg] = argType;
1645  }
1646  break;
1647  }
1648 
1650  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1651  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1652  *ArgTypesIter++ = EltTy;
1653  }
1654  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1655  break;
1656  }
1657 
1658  case ABIArgInfo::Expand:
1659  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1660  getExpandedTypes(it->type, ArgTypesIter);
1661  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1662  break;
1663  }
1664  }
1665 
1666  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1667  assert(Erased && "Not in set?");
1668 
1669  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1670 }
1671 
1673  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1674  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1675 
1676  if (!isFuncTypeConvertible(FPT))
1677  return llvm::StructType::get(getLLVMContext());
1678 
1679  const CGFunctionInfo *Info;
1680  if (isa<CXXDestructorDecl>(MD))
1681  Info =
1683  else
1684  Info = &arrangeCXXMethodDeclaration(MD);
1685  return GetFunctionType(*Info);
1686 }
1687 
1689  llvm::AttrBuilder &FuncAttrs,
1690  const FunctionProtoType *FPT) {
1691  if (!FPT)
1692  return;
1693 
1695  FPT->isNothrow())
1696  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1697 }
1698 
1699 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1700  bool AttrOnCallSite,
1701  llvm::AttrBuilder &FuncAttrs) {
1702  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1703  if (!HasOptnone) {
1704  if (CodeGenOpts.OptimizeSize)
1705  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1706  if (CodeGenOpts.OptimizeSize == 2)
1707  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1708  }
1709 
1710  if (CodeGenOpts.DisableRedZone)
1711  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1712  if (CodeGenOpts.IndirectTlsSegRefs)
1713  FuncAttrs.addAttribute("indirect-tls-seg-refs");
1714  if (CodeGenOpts.NoImplicitFloat)
1715  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1716 
1717  if (AttrOnCallSite) {
1718  // Attributes that should go on the call site only.
1719  if (!CodeGenOpts.SimplifyLibCalls ||
1720  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1721  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1722  if (!CodeGenOpts.TrapFuncName.empty())
1723  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1724  } else {
1725  // Attributes that should go on the function, but not the call site.
1726  if (!CodeGenOpts.DisableFPElim) {
1727  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1728  } else if (CodeGenOpts.OmitLeafFramePointer) {
1729  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1730  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1731  } else {
1732  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1733  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1734  }
1735 
1736  FuncAttrs.addAttribute("less-precise-fpmad",
1737  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1738 
1739  if (CodeGenOpts.NullPointerIsValid)
1740  FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1741  if (!CodeGenOpts.FPDenormalMode.empty())
1742  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1743 
1744  FuncAttrs.addAttribute("no-trapping-math",
1745  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1746 
1747  // Strict (compliant) code is the default, so only add this attribute to
1748  // indicate that we are trying to workaround a problem case.
1749  if (!CodeGenOpts.StrictFloatCastOverflow)
1750  FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1751 
1752  // TODO: Are these all needed?
1753  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1754  FuncAttrs.addAttribute("no-infs-fp-math",
1755  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1756  FuncAttrs.addAttribute("no-nans-fp-math",
1757  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1758  FuncAttrs.addAttribute("unsafe-fp-math",
1759  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1760  FuncAttrs.addAttribute("use-soft-float",
1761  llvm::toStringRef(CodeGenOpts.SoftFloat));
1762  FuncAttrs.addAttribute("stack-protector-buffer-size",
1763  llvm::utostr(CodeGenOpts.SSPBufferSize));
1764  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1765  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1766  FuncAttrs.addAttribute(
1767  "correctly-rounded-divide-sqrt-fp-math",
1768  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1769 
1770  if (getLangOpts().OpenCL)
1771  FuncAttrs.addAttribute("denorms-are-zero",
1772  llvm::toStringRef(CodeGenOpts.FlushDenorm));
1773 
1774  // TODO: Reciprocal estimate codegen options should apply to instructions?
1775  const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1776  if (!Recips.empty())
1777  FuncAttrs.addAttribute("reciprocal-estimates",
1778  llvm::join(Recips, ","));
1779 
1780  if (!CodeGenOpts.PreferVectorWidth.empty() &&
1781  CodeGenOpts.PreferVectorWidth != "none")
1782  FuncAttrs.addAttribute("prefer-vector-width",
1783  CodeGenOpts.PreferVectorWidth);
1784 
1785  if (CodeGenOpts.StackRealignment)
1786  FuncAttrs.addAttribute("stackrealign");
1787  if (CodeGenOpts.Backchain)
1788  FuncAttrs.addAttribute("backchain");
1789 
1790  if (CodeGenOpts.SpeculativeLoadHardening)
1791  FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1792  }
1793 
1794  if (getLangOpts().assumeFunctionsAreConvergent()) {
1795  // Conservatively, mark all functions and calls in CUDA and OpenCL as
1796  // convergent (meaning, they may call an intrinsically convergent op, such
1797  // as __syncthreads() / barrier(), and so can't have certain optimizations
1798  // applied around them). LLVM will remove this attribute where it safely
1799  // can.
1800  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1801  }
1802 
1803  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1804  // Exceptions aren't supported in CUDA device code.
1805  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1806 
1807  // Respect -fcuda-flush-denormals-to-zero.
1808  if (CodeGenOpts.FlushDenorm)
1809  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1810  }
1811 }
1812 
1813 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1814  llvm::AttrBuilder FuncAttrs;
1815  ConstructDefaultFnAttrList(F.getName(),
1816  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1817  /* AttrOnCallsite = */ false, FuncAttrs);
1818  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1819 }
1820 
1822  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1823  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1824  llvm::AttrBuilder FuncAttrs;
1825  llvm::AttrBuilder RetAttrs;
1826 
1827  CallingConv = FI.getEffectiveCallingConvention();
1828  if (FI.isNoReturn())
1829  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1830 
1831  // If we have information about the function prototype, we can learn
1832  // attributes from there.
1834  CalleeInfo.getCalleeFunctionProtoType());
1835 
1836  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1837 
1838  bool HasOptnone = false;
1839  // FIXME: handle sseregparm someday...
1840  if (TargetDecl) {
1841  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1842  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1843  if (TargetDecl->hasAttr<NoThrowAttr>())
1844  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1845  if (TargetDecl->hasAttr<NoReturnAttr>())
1846  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1847  if (TargetDecl->hasAttr<ColdAttr>())
1848  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1849  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1850  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1851  if (TargetDecl->hasAttr<ConvergentAttr>())
1852  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1853 
1854  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1856  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1857  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1858  // These attributes are not inherited by overloads.
1859  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1860  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1861  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1862  }
1863 
1864  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1865  if (TargetDecl->hasAttr<ConstAttr>()) {
1866  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1867  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1868  } else if (TargetDecl->hasAttr<PureAttr>()) {
1869  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1870  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1871  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1872  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1873  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1874  }
1875  if (TargetDecl->hasAttr<RestrictAttr>())
1876  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1877  if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1878  !CodeGenOpts.NullPointerIsValid)
1879  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1880  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1881  FuncAttrs.addAttribute("no_caller_saved_registers");
1882  if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1883  FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1884 
1885  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1886  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1887  Optional<unsigned> NumElemsParam;
1888  if (AllocSize->getNumElemsParam().isValid())
1889  NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1890  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1891  NumElemsParam);
1892  }
1893  }
1894 
1895  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1896 
1897  if (CodeGenOpts.EnableSegmentedStacks &&
1898  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1899  FuncAttrs.addAttribute("split-stack");
1900 
1901  // Add NonLazyBind attribute to function declarations when -fno-plt
1902  // is used.
1903  if (TargetDecl && CodeGenOpts.NoPLT) {
1904  if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1905  if (!Fn->isDefined() && !AttrOnCallSite) {
1906  FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1907  }
1908  }
1909  }
1910 
1911  if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1912  if (getLangOpts().OpenCLVersion <= 120) {
1913  // OpenCL v1.2 Work groups are always uniform
1914  FuncAttrs.addAttribute("uniform-work-group-size", "true");
1915  } else {
1916  // OpenCL v2.0 Work groups may be whether uniform or not.
1917  // '-cl-uniform-work-group-size' compile option gets a hint
1918  // to the compiler that the global work-size be a multiple of
1919  // the work-group size specified to clEnqueueNDRangeKernel
1920  // (i.e. work groups are uniform).
1921  FuncAttrs.addAttribute("uniform-work-group-size",
1922  llvm::toStringRef(CodeGenOpts.UniformWGSize));
1923  }
1924  }
1925 
1926  if (!AttrOnCallSite) {
1927  bool DisableTailCalls = false;
1928 
1929  if (CodeGenOpts.DisableTailCalls)
1930  DisableTailCalls = true;
1931  else if (TargetDecl) {
1932  if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1933  TargetDecl->hasAttr<AnyX86InterruptAttr>())
1934  DisableTailCalls = true;
1935  else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1936  if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1937  if (!BD->doesNotEscape())
1938  DisableTailCalls = true;
1939  }
1940  }
1941 
1942  FuncAttrs.addAttribute("disable-tail-calls",
1943  llvm::toStringRef(DisableTailCalls));
1944  GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
1945  }
1946 
1947  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1948 
1949  QualType RetTy = FI.getReturnType();
1950  const ABIArgInfo &RetAI = FI.getReturnInfo();
1951  switch (RetAI.getKind()) {
1952  case ABIArgInfo::Extend:
1953  if (RetAI.isSignExt())
1954  RetAttrs.addAttribute(llvm::Attribute::SExt);
1955  else
1956  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1957  LLVM_FALLTHROUGH;
1958  case ABIArgInfo::Direct:
1959  if (RetAI.getInReg())
1960  RetAttrs.addAttribute(llvm::Attribute::InReg);
1961  break;
1962  case ABIArgInfo::Ignore:
1963  break;
1964 
1965  case ABIArgInfo::InAlloca:
1966  case ABIArgInfo::Indirect: {
1967  // inalloca and sret disable readnone and readonly
1968  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1969  .removeAttribute(llvm::Attribute::ReadNone);
1970  break;
1971  }
1972 
1974  break;
1975 
1976  case ABIArgInfo::Expand:
1977  llvm_unreachable("Invalid ABI kind for return argument");
1978  }
1979 
1980  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1981  QualType PTy = RefTy->getPointeeType();
1982  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1983  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1984  .getQuantity());
1985  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
1986  !CodeGenOpts.NullPointerIsValid)
1987  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1988  }
1989 
1990  bool hasUsedSRet = false;
1991  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1992 
1993  // Attach attributes to sret.
1994  if (IRFunctionArgs.hasSRetArg()) {
1995  llvm::AttrBuilder SRETAttrs;
1996  if (!RetAI.getSuppressSRet())
1997  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1998  hasUsedSRet = true;
1999  if (RetAI.getInReg())
2000  SRETAttrs.addAttribute(llvm::Attribute::InReg);
2001  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2002  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2003  }
2004 
2005  // Attach attributes to inalloca argument.
2006  if (IRFunctionArgs.hasInallocaArg()) {
2007  llvm::AttrBuilder Attrs;
2008  Attrs.addAttribute(llvm::Attribute::InAlloca);
2009  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2010  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2011  }
2012 
2013  unsigned ArgNo = 0;
2015  E = FI.arg_end();
2016  I != E; ++I, ++ArgNo) {
2017  QualType ParamType = I->type;
2018  const ABIArgInfo &AI = I->info;
2019  llvm::AttrBuilder Attrs;
2020 
2021  // Add attribute for padding argument, if necessary.
2022  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2023  if (AI.getPaddingInReg()) {
2024  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2025  llvm::AttributeSet::get(
2026  getLLVMContext(),
2027  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2028  }
2029  }
2030 
2031  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2032  // have the corresponding parameter variable. It doesn't make
2033  // sense to do it here because parameters are so messed up.
2034  switch (AI.getKind()) {
2035  case ABIArgInfo::Extend:
2036  if (AI.isSignExt())
2037  Attrs.addAttribute(llvm::Attribute::SExt);
2038  else
2039  Attrs.addAttribute(llvm::Attribute::ZExt);
2040  LLVM_FALLTHROUGH;
2041  case ABIArgInfo::Direct:
2042  if (ArgNo == 0 && FI.isChainCall())
2043  Attrs.addAttribute(llvm::Attribute::Nest);
2044  else if (AI.getInReg())
2045  Attrs.addAttribute(llvm::Attribute::InReg);
2046  break;
2047 
2048  case ABIArgInfo::Indirect: {
2049  if (AI.getInReg())
2050  Attrs.addAttribute(llvm::Attribute::InReg);
2051 
2052  if (AI.getIndirectByVal())
2053  Attrs.addAttribute(llvm::Attribute::ByVal);
2054 
2055  CharUnits Align = AI.getIndirectAlign();
2056 
2057  // In a byval argument, it is important that the required
2058  // alignment of the type is honored, as LLVM might be creating a
2059  // *new* stack object, and needs to know what alignment to give
2060  // it. (Sometimes it can deduce a sensible alignment on its own,
2061  // but not if clang decides it must emit a packed struct, or the
2062  // user specifies increased alignment requirements.)
2063  //
2064  // This is different from indirect *not* byval, where the object
2065  // exists already, and the align attribute is purely
2066  // informative.
2067  assert(!Align.isZero());
2068 
2069  // For now, only add this when we have a byval argument.
2070  // TODO: be less lazy about updating test cases.
2071  if (AI.getIndirectByVal())
2072  Attrs.addAlignmentAttr(Align.getQuantity());
2073 
2074  // byval disables readnone and readonly.
2075  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2076  .removeAttribute(llvm::Attribute::ReadNone);
2077  break;
2078  }
2079  case ABIArgInfo::Ignore:
2080  case ABIArgInfo::Expand:
2082  break;
2083 
2084  case ABIArgInfo::InAlloca:
2085  // inalloca disables readnone and readonly.
2086  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2087  .removeAttribute(llvm::Attribute::ReadNone);
2088  continue;
2089  }
2090 
2091  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2092  QualType PTy = RefTy->getPointeeType();
2093  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2094  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2095  .getQuantity());
2096  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2097  !CodeGenOpts.NullPointerIsValid)
2098  Attrs.addAttribute(llvm::Attribute::NonNull);
2099  }
2100 
2101  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2103  break;
2104 
2106  // Add 'sret' if we haven't already used it for something, but
2107  // only if the result is void.
2108  if (!hasUsedSRet && RetTy->isVoidType()) {
2109  Attrs.addAttribute(llvm::Attribute::StructRet);
2110  hasUsedSRet = true;
2111  }
2112 
2113  // Add 'noalias' in either case.
2114  Attrs.addAttribute(llvm::Attribute::NoAlias);
2115 
2116  // Add 'dereferenceable' and 'alignment'.
2117  auto PTy = ParamType->getPointeeType();
2118  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2119  auto info = getContext().getTypeInfoInChars(PTy);
2120  Attrs.addDereferenceableAttr(info.first.getQuantity());
2121  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2122  info.second.getQuantity()));
2123  }
2124  break;
2125  }
2126 
2128  Attrs.addAttribute(llvm::Attribute::SwiftError);
2129  break;
2130 
2132  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2133  break;
2134  }
2135 
2136  if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2137  Attrs.addAttribute(llvm::Attribute::NoCapture);
2138 
2139  if (Attrs.hasAttributes()) {
2140  unsigned FirstIRArg, NumIRArgs;
2141  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2142  for (unsigned i = 0; i < NumIRArgs; i++)
2143  ArgAttrs[FirstIRArg + i] =
2144  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2145  }
2146  }
2147  assert(ArgNo == FI.arg_size());
2148 
2149  AttrList = llvm::AttributeList::get(
2150  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2151  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2152 }
2153 
2154 /// An argument came in as a promoted argument; demote it back to its
2155 /// declared type.
2157  const VarDecl *var,
2158  llvm::Value *value) {
2159  llvm::Type *varType = CGF.ConvertType(var->getType());
2160 
2161  // This can happen with promotions that actually don't change the
2162  // underlying type, like the enum promotions.
2163  if (value->getType() == varType) return value;
2164 
2165  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2166  && "unexpected promotion type");
2167 
2168  if (isa<llvm::IntegerType>(varType))
2169  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2170 
2171  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2172 }
2173 
2174 /// Returns the attribute (either parameter attribute, or function
2175 /// attribute), which declares argument ArgNo to be non-null.
2176 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2177  QualType ArgType, unsigned ArgNo) {
2178  // FIXME: __attribute__((nonnull)) can also be applied to:
2179  // - references to pointers, where the pointee is known to be
2180  // nonnull (apparently a Clang extension)
2181  // - transparent unions containing pointers
2182  // In the former case, LLVM IR cannot represent the constraint. In
2183  // the latter case, we have no guarantee that the transparent union
2184  // is in fact passed as a pointer.
2185  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2186  return nullptr;
2187  // First, check attribute on parameter itself.
2188  if (PVD) {
2189  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2190  return ParmNNAttr;
2191  }
2192  // Check function attributes.
2193  if (!FD)
2194  return nullptr;
2195  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2196  if (NNAttr->isNonNull(ArgNo))
2197  return NNAttr;
2198  }
2199  return nullptr;
2200 }
2201 
2202 namespace {
2203  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2204  Address Temp;
2205  Address Arg;
2206  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2207  void Emit(CodeGenFunction &CGF, Flags flags) override {
2208  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2209  CGF.Builder.CreateStore(errorValue, Arg);
2210  }
2211  };
2212 }
2213 
2215  llvm::Function *Fn,
2216  const FunctionArgList &Args) {
2217  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2218  // Naked functions don't have prologues.
2219  return;
2220 
2221  // If this is an implicit-return-zero function, go ahead and
2222  // initialize the return value. TODO: it might be nice to have
2223  // a more general mechanism for this that didn't require synthesized
2224  // return statements.
2225  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2226  if (FD->hasImplicitReturnZero()) {
2227  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2228  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2229  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2230  Builder.CreateStore(Zero, ReturnValue);
2231  }
2232  }
2233 
2234  // FIXME: We no longer need the types from FunctionArgList; lift up and
2235  // simplify.
2236 
2237  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2238  // Flattened function arguments.
2240  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2241  for (auto &Arg : Fn->args()) {
2242  FnArgs.push_back(&Arg);
2243  }
2244  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2245 
2246  // If we're using inalloca, all the memory arguments are GEPs off of the last
2247  // parameter, which is a pointer to the complete memory area.
2248  Address ArgStruct = Address::invalid();
2249  const llvm::StructLayout *ArgStructLayout = nullptr;
2250  if (IRFunctionArgs.hasInallocaArg()) {
2251  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2252  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2253  FI.getArgStructAlignment());
2254 
2255  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2256  }
2257 
2258  // Name the struct return parameter.
2259  if (IRFunctionArgs.hasSRetArg()) {
2260  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2261  AI->setName("agg.result");
2262  AI->addAttr(llvm::Attribute::NoAlias);
2263  }
2264 
2265  // Track if we received the parameter as a pointer (indirect, byval, or
2266  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2267  // into a local alloca for us.
2269  ArgVals.reserve(Args.size());
2270 
2271  // Create a pointer value for every parameter declaration. This usually
2272  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2273  // any cleanups or do anything that might unwind. We do that separately, so
2274  // we can push the cleanups in the correct order for the ABI.
2275  assert(FI.arg_size() == Args.size() &&
2276  "Mismatch between function signature & arguments.");
2277  unsigned ArgNo = 0;
2279  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2280  i != e; ++i, ++info_it, ++ArgNo) {
2281  const VarDecl *Arg = *i;
2282  const ABIArgInfo &ArgI = info_it->info;
2283 
2284  bool isPromoted =
2285  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2286  // We are converting from ABIArgInfo type to VarDecl type directly, unless
2287  // the parameter is promoted. In this case we convert to
2288  // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2289  QualType Ty = isPromoted ? info_it->type : Arg->getType();
2290  assert(hasScalarEvaluationKind(Ty) ==
2291  hasScalarEvaluationKind(Arg->getType()));
2292 
2293  unsigned FirstIRArg, NumIRArgs;
2294  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2295 
2296  switch (ArgI.getKind()) {
2297  case ABIArgInfo::InAlloca: {
2298  assert(NumIRArgs == 0);
2299  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2300  CharUnits FieldOffset =
2301  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2302  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2303  Arg->getName());
2304  ArgVals.push_back(ParamValue::forIndirect(V));
2305  break;
2306  }
2307 
2308  case ABIArgInfo::Indirect: {
2309  assert(NumIRArgs == 1);
2310  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2311 
2312  if (!hasScalarEvaluationKind(Ty)) {
2313  // Aggregates and complex variables are accessed by reference. All we
2314  // need to do is realign the value, if requested.
2315  Address V = ParamAddr;
2316  if (ArgI.getIndirectRealign()) {
2317  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2318 
2319  // Copy from the incoming argument pointer to the temporary with the
2320  // appropriate alignment.
2321  //
2322  // FIXME: We should have a common utility for generating an aggregate
2323  // copy.
2325  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2326  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2327  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2328  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2329  V = AlignedTemp;
2330  }
2331  ArgVals.push_back(ParamValue::forIndirect(V));
2332  } else {
2333  // Load scalar value from indirect argument.
2334  llvm::Value *V =
2335  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2336 
2337  if (isPromoted)
2338  V = emitArgumentDemotion(*this, Arg, V);
2339  ArgVals.push_back(ParamValue::forDirect(V));
2340  }
2341  break;
2342  }
2343 
2344  case ABIArgInfo::Extend:
2345  case ABIArgInfo::Direct: {
2346 
2347  // If we have the trivial case, handle it with no muss and fuss.
2348  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2349  ArgI.getCoerceToType() == ConvertType(Ty) &&
2350  ArgI.getDirectOffset() == 0) {
2351  assert(NumIRArgs == 1);
2352  llvm::Value *V = FnArgs[FirstIRArg];
2353  auto AI = cast<llvm::Argument>(V);
2354 
2355  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2356  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2357  PVD->getFunctionScopeIndex()) &&
2358  !CGM.getCodeGenOpts().NullPointerIsValid)
2359  AI->addAttr(llvm::Attribute::NonNull);
2360 
2361  QualType OTy = PVD->getOriginalType();
2362  if (const auto *ArrTy =
2363  getContext().getAsConstantArrayType(OTy)) {
2364  // A C99 array parameter declaration with the static keyword also
2365  // indicates dereferenceability, and if the size is constant we can
2366  // use the dereferenceable attribute (which requires the size in
2367  // bytes).
2368  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2369  QualType ETy = ArrTy->getElementType();
2370  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2371  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2372  ArrSize) {
2373  llvm::AttrBuilder Attrs;
2374  Attrs.addDereferenceableAttr(
2375  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2376  AI->addAttrs(Attrs);
2377  } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2378  !CGM.getCodeGenOpts().NullPointerIsValid) {
2379  AI->addAttr(llvm::Attribute::NonNull);
2380  }
2381  }
2382  } else if (const auto *ArrTy =
2383  getContext().getAsVariableArrayType(OTy)) {
2384  // For C99 VLAs with the static keyword, we don't know the size so
2385  // we can't use the dereferenceable attribute, but in addrspace(0)
2386  // we know that it must be nonnull.
2387  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2388  !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2389  !CGM.getCodeGenOpts().NullPointerIsValid)
2390  AI->addAttr(llvm::Attribute::NonNull);
2391  }
2392 
2393  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2394  if (!AVAttr)
2395  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2396  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2397  if (AVAttr) {
2398  llvm::Value *AlignmentValue =
2399  EmitScalarExpr(AVAttr->getAlignment());
2400  llvm::ConstantInt *AlignmentCI =
2401  cast<llvm::ConstantInt>(AlignmentValue);
2402  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2403  +llvm::Value::MaximumAlignment);
2404  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2405  }
2406  }
2407 
2408  if (Arg->getType().isRestrictQualified())
2409  AI->addAttr(llvm::Attribute::NoAlias);
2410 
2411  // LLVM expects swifterror parameters to be used in very restricted
2412  // ways. Copy the value into a less-restricted temporary.
2413  if (FI.getExtParameterInfo(ArgNo).getABI()
2415  QualType pointeeTy = Ty->getPointeeType();
2416  assert(pointeeTy->isPointerType());
2417  Address temp =
2418  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2419  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2420  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2421  Builder.CreateStore(incomingErrorValue, temp);
2422  V = temp.getPointer();
2423 
2424  // Push a cleanup to copy the value back at the end of the function.
2425  // The convention does not guarantee that the value will be written
2426  // back if the function exits with an unwind exception.
2427  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2428  }
2429 
2430  // Ensure the argument is the correct type.
2431  if (V->getType() != ArgI.getCoerceToType())
2432  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2433 
2434  if (isPromoted)
2435  V = emitArgumentDemotion(*this, Arg, V);
2436 
2437  // Because of merging of function types from multiple decls it is
2438  // possible for the type of an argument to not match the corresponding
2439  // type in the function type. Since we are codegening the callee
2440  // in here, add a cast to the argument type.
2441  llvm::Type *LTy = ConvertType(Arg->getType());
2442  if (V->getType() != LTy)
2443  V = Builder.CreateBitCast(V, LTy);
2444 
2445  ArgVals.push_back(ParamValue::forDirect(V));
2446  break;
2447  }
2448 
2449  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2450  Arg->getName());
2451 
2452  // Pointer to store into.
2453  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2454 
2455  // Fast-isel and the optimizer generally like scalar values better than
2456  // FCAs, so we flatten them if this is safe to do for this argument.
2457  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2458  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2459  STy->getNumElements() > 1) {
2460  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2461  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2462  llvm::Type *DstTy = Ptr.getElementType();
2463  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2464 
2465  Address AddrToStoreInto = Address::invalid();
2466  if (SrcSize <= DstSize) {
2467  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2468  } else {
2469  AddrToStoreInto =
2470  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2471  }
2472 
2473  assert(STy->getNumElements() == NumIRArgs);
2474  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2475  auto AI = FnArgs[FirstIRArg + i];
2476  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2477  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2478  Address EltPtr =
2479  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2480  Builder.CreateStore(AI, EltPtr);
2481  }
2482 
2483  if (SrcSize > DstSize) {
2484  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2485  }
2486 
2487  } else {
2488  // Simple case, just do a coerced store of the argument into the alloca.
2489  assert(NumIRArgs == 1);
2490  auto AI = FnArgs[FirstIRArg];
2491  AI->setName(Arg->getName() + ".coerce");
2492  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2493  }
2494 
2495  // Match to what EmitParmDecl is expecting for this type.
2497  llvm::Value *V =
2498  EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2499  if (isPromoted)
2500  V = emitArgumentDemotion(*this, Arg, V);
2501  ArgVals.push_back(ParamValue::forDirect(V));
2502  } else {
2503  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2504  }
2505  break;
2506  }
2507 
2509  // Reconstruct into a temporary.
2510  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2511  ArgVals.push_back(ParamValue::forIndirect(alloca));
2512 
2513  auto coercionType = ArgI.getCoerceAndExpandType();
2514  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2515  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2516 
2517  unsigned argIndex = FirstIRArg;
2518  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2519  llvm::Type *eltType = coercionType->getElementType(i);
2521  continue;
2522 
2523  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2524  auto elt = FnArgs[argIndex++];
2525  Builder.CreateStore(elt, eltAddr);
2526  }
2527  assert(argIndex == FirstIRArg + NumIRArgs);
2528  break;
2529  }
2530 
2531  case ABIArgInfo::Expand: {
2532  // If this structure was expanded into multiple arguments then
2533  // we need to create a temporary and reconstruct it from the
2534  // arguments.
2535  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2536  LValue LV = MakeAddrLValue(Alloca, Ty);
2537  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2538 
2539  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2540  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2541  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2542  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2543  auto AI = FnArgs[FirstIRArg + i];
2544  AI->setName(Arg->getName() + "." + Twine(i));
2545  }
2546  break;
2547  }
2548 
2549  case ABIArgInfo::Ignore:
2550  assert(NumIRArgs == 0);
2551  // Initialize the local variable appropriately.
2552  if (!hasScalarEvaluationKind(Ty)) {
2553  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2554  } else {
2555  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2556  ArgVals.push_back(ParamValue::forDirect(U));
2557  }
2558  break;
2559  }
2560  }
2561 
2562  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2563  for (int I = Args.size() - 1; I >= 0; --I)
2564  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2565  } else {
2566  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2567  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2568  }
2569 }
2570 
2571 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2572  while (insn->use_empty()) {
2573  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2574  if (!bitcast) return;
2575 
2576  // This is "safe" because we would have used a ConstantExpr otherwise.
2577  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2578  bitcast->eraseFromParent();
2579  }
2580 }
2581 
2582 /// Try to emit a fused autorelease of a return result.
2584  llvm::Value *result) {
2585  // We must be immediately followed the cast.
2586  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2587  if (BB->empty()) return nullptr;
2588  if (&BB->back() != result) return nullptr;
2589 
2590  llvm::Type *resultType = result->getType();
2591 
2592  // result is in a BasicBlock and is therefore an Instruction.
2593  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2594 
2595  SmallVector<llvm::Instruction *, 4> InstsToKill;
2596 
2597  // Look for:
2598  // %generator = bitcast %type1* %generator2 to %type2*
2599  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2600  // We would have emitted this as a constant if the operand weren't
2601  // an Instruction.
2602  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2603 
2604  // Require the generator to be immediately followed by the cast.
2605  if (generator->getNextNode() != bitcast)
2606  return nullptr;
2607 
2608  InstsToKill.push_back(bitcast);
2609  }
2610 
2611  // Look for:
2612  // %generator = call i8* @objc_retain(i8* %originalResult)
2613  // or
2614  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2615  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2616  if (!call) return nullptr;
2617 
2618  bool doRetainAutorelease;
2619 
2620  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2621  doRetainAutorelease = true;
2622  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2624  doRetainAutorelease = false;
2625 
2626  // If we emitted an assembly marker for this call (and the
2627  // ARCEntrypoints field should have been set if so), go looking
2628  // for that call. If we can't find it, we can't do this
2629  // optimization. But it should always be the immediately previous
2630  // instruction, unless we needed bitcasts around the call.
2632  llvm::Instruction *prev = call->getPrevNode();
2633  assert(prev);
2634  if (isa<llvm::BitCastInst>(prev)) {
2635  prev = prev->getPrevNode();
2636  assert(prev);
2637  }
2638  assert(isa<llvm::CallInst>(prev));
2639  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2641  InstsToKill.push_back(prev);
2642  }
2643  } else {
2644  return nullptr;
2645  }
2646 
2647  result = call->getArgOperand(0);
2648  InstsToKill.push_back(call);
2649 
2650  // Keep killing bitcasts, for sanity. Note that we no longer care
2651  // about precise ordering as long as there's exactly one use.
2652  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2653  if (!bitcast->hasOneUse()) break;
2654  InstsToKill.push_back(bitcast);
2655  result = bitcast->getOperand(0);
2656  }
2657 
2658  // Delete all the unnecessary instructions, from latest to earliest.
2659  for (auto *I : InstsToKill)
2660  I->eraseFromParent();
2661 
2662  // Do the fused retain/autorelease if we were asked to.
2663  if (doRetainAutorelease)
2664  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2665 
2666  // Cast back to the result type.
2667  return CGF.Builder.CreateBitCast(result, resultType);
2668 }
2669 
2670 /// If this is a +1 of the value of an immutable 'self', remove it.
2672  llvm::Value *result) {
2673  // This is only applicable to a method with an immutable 'self'.
2674  const ObjCMethodDecl *method =
2675  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2676  if (!method) return nullptr;
2677  const VarDecl *self = method->getSelfDecl();
2678  if (!self->getType().isConstQualified()) return nullptr;
2679 
2680  // Look for a retain call.
2681  llvm::CallInst *retainCall =
2682  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2683  if (!retainCall ||
2684  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2685  return nullptr;
2686 
2687  // Look for an ordinary load of 'self'.
2688  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2689  llvm::LoadInst *load =
2690  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2691  if (!load || load->isAtomic() || load->isVolatile() ||
2692  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2693  return nullptr;
2694 
2695  // Okay! Burn it all down. This relies for correctness on the
2696  // assumption that the retain is emitted as part of the return and
2697  // that thereafter everything is used "linearly".
2698  llvm::Type *resultType = result->getType();
2699  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2700  assert(retainCall->use_empty());
2701  retainCall->eraseFromParent();
2702  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2703 
2704  return CGF.Builder.CreateBitCast(load, resultType);
2705 }
2706 
2707 /// Emit an ARC autorelease of the result of a function.
2708 ///
2709 /// \return the value to actually return from the function
2711  llvm::Value *result) {
2712  // If we're returning 'self', kill the initial retain. This is a
2713  // heuristic attempt to "encourage correctness" in the really unfortunate
2714  // case where we have a return of self during a dealloc and we desperately
2715  // need to avoid the possible autorelease.
2716  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2717  return self;
2718 
2719  // At -O0, try to emit a fused retain/autorelease.
2720  if (CGF.shouldUseFusedARCCalls())
2721  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2722  return fused;
2723 
2724  return CGF.EmitARCAutoreleaseReturnValue(result);
2725 }
2726 
2727 /// Heuristically search for a dominating store to the return-value slot.
2728 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2729  // Check if a User is a store which pointerOperand is the ReturnValue.
2730  // We are looking for stores to the ReturnValue, not for stores of the
2731  // ReturnValue to some other location.
2732  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2733  auto *SI = dyn_cast<llvm::StoreInst>(U);
2734  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2735  return nullptr;
2736  // These aren't actually possible for non-coerced returns, and we
2737  // only care about non-coerced returns on this code path.
2738  assert(!SI->isAtomic() && !SI->isVolatile());
2739  return SI;
2740  };
2741  // If there are multiple uses of the return-value slot, just check
2742  // for something immediately preceding the IP. Sometimes this can
2743  // happen with how we generate implicit-returns; it can also happen
2744  // with noreturn cleanups.
2745  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2746  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2747  if (IP->empty()) return nullptr;
2748  llvm::Instruction *I = &IP->back();
2749 
2750  // Skip lifetime markers
2751  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2752  IE = IP->rend();
2753  II != IE; ++II) {
2754  if (llvm::IntrinsicInst *Intrinsic =
2755  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2756  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2757  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2758  ++II;
2759  if (II == IE)
2760  break;
2761  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2762  continue;
2763  }
2764  }
2765  I = &*II;
2766  break;
2767  }
2768 
2769  return GetStoreIfValid(I);
2770  }
2771 
2772  llvm::StoreInst *store =
2773  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2774  if (!store) return nullptr;
2775 
2776  // Now do a first-and-dirty dominance check: just walk up the
2777  // single-predecessors chain from the current insertion point.
2778  llvm::BasicBlock *StoreBB = store->getParent();
2779  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2780  while (IP != StoreBB) {
2781  if (!(IP = IP->getSinglePredecessor()))
2782  return nullptr;
2783  }
2784 
2785  // Okay, the store's basic block dominates the insertion point; we
2786  // can do our thing.
2787  return store;
2788 }
2789 
2791  bool EmitRetDbgLoc,
2792  SourceLocation EndLoc) {
2793  if (FI.isNoReturn()) {
2794  // Noreturn functions don't return.
2795  EmitUnreachable(EndLoc);
2796  return;
2797  }
2798 
2799  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2800  // Naked functions don't have epilogues.
2801  Builder.CreateUnreachable();
2802  return;
2803  }
2804 
2805  // Functions with no result always return void.
2806  if (!ReturnValue.isValid()) {
2807  Builder.CreateRetVoid();
2808  return;
2809  }
2810 
2811  llvm::DebugLoc RetDbgLoc;
2812  llvm::Value *RV = nullptr;
2813  QualType RetTy = FI.getReturnType();
2814  const ABIArgInfo &RetAI = FI.getReturnInfo();
2815 
2816  switch (RetAI.getKind()) {
2817  case ABIArgInfo::InAlloca:
2818  // Aggregrates get evaluated directly into the destination. Sometimes we
2819  // need to return the sret value in a register, though.
2820  assert(hasAggregateEvaluationKind(RetTy));
2821  if (RetAI.getInAllocaSRet()) {
2822  llvm::Function::arg_iterator EI = CurFn->arg_end();
2823  --EI;
2824  llvm::Value *ArgStruct = &*EI;
2825  llvm::Value *SRet = Builder.CreateStructGEP(
2826  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2827  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2828  }
2829  break;
2830 
2831  case ABIArgInfo::Indirect: {
2832  auto AI = CurFn->arg_begin();
2833  if (RetAI.isSRetAfterThis())
2834  ++AI;
2835  switch (getEvaluationKind(RetTy)) {
2836  case TEK_Complex: {
2837  ComplexPairTy RT =
2838  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2839  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2840  /*isInit*/ true);
2841  break;
2842  }
2843  case TEK_Aggregate:
2844  // Do nothing; aggregrates get evaluated directly into the destination.
2845  break;
2846  case TEK_Scalar:
2847  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2848  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2849  /*isInit*/ true);
2850  break;
2851  }
2852  break;
2853  }
2854 
2855  case ABIArgInfo::Extend:
2856  case ABIArgInfo::Direct:
2857  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2858  RetAI.getDirectOffset() == 0) {
2859  // The internal return value temp always will have pointer-to-return-type
2860  // type, just do a load.
2861 
2862  // If there is a dominating store to ReturnValue, we can elide
2863  // the load, zap the store, and usually zap the alloca.
2864  if (llvm::StoreInst *SI =
2866  // Reuse the debug location from the store unless there is
2867  // cleanup code to be emitted between the store and return
2868  // instruction.
2869  if (EmitRetDbgLoc && !AutoreleaseResult)
2870  RetDbgLoc = SI->getDebugLoc();
2871  // Get the stored value and nuke the now-dead store.
2872  RV = SI->getValueOperand();
2873  SI->eraseFromParent();
2874 
2875  // If that was the only use of the return value, nuke it as well now.
2876  auto returnValueInst = ReturnValue.getPointer();
2877  if (returnValueInst->use_empty()) {
2878  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2879  alloca->eraseFromParent();
2880  ReturnValue = Address::invalid();
2881  }
2882  }
2883 
2884  // Otherwise, we have to do a simple load.
2885  } else {
2886  RV = Builder.CreateLoad(ReturnValue);
2887  }
2888  } else {
2889  // If the value is offset in memory, apply the offset now.
2890  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2891 
2892  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2893  }
2894 
2895  // In ARC, end functions that return a retainable type with a call
2896  // to objc_autoreleaseReturnValue.
2897  if (AutoreleaseResult) {
2898 #ifndef NDEBUG
2899  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2900  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2901  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2902  // CurCodeDecl or BlockInfo.
2903  QualType RT;
2904 
2905  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2906  RT = FD->getReturnType();
2907  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2908  RT = MD->getReturnType();
2909  else if (isa<BlockDecl>(CurCodeDecl))
2910  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2911  else
2912  llvm_unreachable("Unexpected function/method type");
2913 
2914  assert(getLangOpts().ObjCAutoRefCount &&
2915  !FI.isReturnsRetained() &&
2916  RT->isObjCRetainableType());
2917 #endif
2918  RV = emitAutoreleaseOfResult(*this, RV);
2919  }
2920 
2921  break;
2922 
2923  case ABIArgInfo::Ignore:
2924  break;
2925 
2927  auto coercionType = RetAI.getCoerceAndExpandType();
2928  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2929 
2930  // Load all of the coerced elements out into results.
2932  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2933  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2934  auto coercedEltType = coercionType->getElementType(i);
2935  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2936  continue;
2937 
2938  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2939  auto elt = Builder.CreateLoad(eltAddr);
2940  results.push_back(elt);
2941  }
2942 
2943  // If we have one result, it's the single direct result type.
2944  if (results.size() == 1) {
2945  RV = results[0];
2946 
2947  // Otherwise, we need to make a first-class aggregate.
2948  } else {
2949  // Construct a return type that lacks padding elements.
2950  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2951 
2952  RV = llvm::UndefValue::get(returnType);
2953  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2954  RV = Builder.CreateInsertValue(RV, results[i], i);
2955  }
2956  }
2957  break;
2958  }
2959 
2960  case ABIArgInfo::Expand:
2961  llvm_unreachable("Invalid ABI kind for return argument");
2962  }
2963 
2964  llvm::Instruction *Ret;
2965  if (RV) {
2966  EmitReturnValueCheck(RV);
2967  Ret = Builder.CreateRet(RV);
2968  } else {
2969  Ret = Builder.CreateRetVoid();
2970  }
2971 
2972  if (RetDbgLoc)
2973  Ret->setDebugLoc(std::move(RetDbgLoc));
2974 }
2975 
2977  // A current decl may not be available when emitting vtable thunks.
2978  if (!CurCodeDecl)
2979  return;
2980 
2981  ReturnsNonNullAttr *RetNNAttr = nullptr;
2982  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2983  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2984 
2985  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2986  return;
2987 
2988  // Prefer the returns_nonnull attribute if it's present.
2989  SourceLocation AttrLoc;
2990  SanitizerMask CheckKind;
2991  SanitizerHandler Handler;
2992  if (RetNNAttr) {
2993  assert(!requiresReturnValueNullabilityCheck() &&
2994  "Cannot check nullability and the nonnull attribute");
2995  AttrLoc = RetNNAttr->getLocation();
2996  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2997  Handler = SanitizerHandler::NonnullReturn;
2998  } else {
2999  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3000  if (auto *TSI = DD->getTypeSourceInfo())
3001  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3002  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3003  CheckKind = SanitizerKind::NullabilityReturn;
3004  Handler = SanitizerHandler::NullabilityReturn;
3005  }
3006 
3007  SanitizerScope SanScope(this);
3008 
3009  // Make sure the "return" source location is valid. If we're checking a
3010  // nullability annotation, make sure the preconditions for the check are met.
3011  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3012  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3013  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3014  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3015  if (requiresReturnValueNullabilityCheck())
3016  CanNullCheck =
3017  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3018  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3019  EmitBlock(Check);
3020 
3021  // Now do the null check.
3022  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3023  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3024  llvm::Value *DynamicData[] = {SLocPtr};
3025  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3026 
3027  EmitBlock(NoCheck);
3028 
3029 #ifndef NDEBUG
3030  // The return location should not be used after the check has been emitted.
3031  ReturnLocation = Address::invalid();
3032 #endif
3033 }
3034 
3036  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3037  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3038 }
3039 
3041  QualType Ty) {
3042  // FIXME: Generate IR in one pass, rather than going back and fixing up these
3043  // placeholders.
3044  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3045  llvm::Type *IRPtrTy = IRTy->getPointerTo();
3046  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3047 
3048  // FIXME: When we generate this IR in one pass, we shouldn't need
3049  // this win32-specific alignment hack.
3051  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3052 
3053  return AggValueSlot::forAddr(Address(Placeholder, Align),
3054  Ty.getQualifiers(),
3059 }
3060 
3062  const VarDecl *param,
3063  SourceLocation loc) {
3064  // StartFunction converted the ABI-lowered parameter(s) into a
3065  // local alloca. We need to turn that into an r-value suitable
3066  // for EmitCall.
3067  Address local = GetAddrOfLocalVar(param);
3068 
3069  QualType type = param->getType();
3070 
3071  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3072  "cannot emit delegate call arguments for inalloca arguments!");
3073 
3074  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3075  // but the argument needs to be the original pointer.
3076  if (type->isReferenceType()) {
3077  args.add(RValue::get(Builder.CreateLoad(local)), type);
3078 
3079  // In ARC, move out of consumed arguments so that the release cleanup
3080  // entered by StartFunction doesn't cause an over-release. This isn't
3081  // optimal -O0 code generation, but it should get cleaned up when
3082  // optimization is enabled. This also assumes that delegate calls are
3083  // performed exactly once for a set of arguments, but that should be safe.
3084  } else if (getLangOpts().ObjCAutoRefCount &&
3085  param->hasAttr<NSConsumedAttr>() &&
3086  type->isObjCRetainableType()) {
3087  llvm::Value *ptr = Builder.CreateLoad(local);
3088  auto null =
3089  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3090  Builder.CreateStore(null, local);
3091  args.add(RValue::get(ptr), type);
3092 
3093  // For the most part, we just need to load the alloca, except that
3094  // aggregate r-values are actually pointers to temporaries.
3095  } else {
3096  args.add(convertTempToRValue(local, type, loc), type);
3097  }
3098 
3099  // Deactivate the cleanup for the callee-destructed param that was pushed.
3100  if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3102  type.isDestructedType()) {
3104  CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3105  assert(cleanup.isValid() &&
3106  "cleanup for callee-destructed param not recorded");
3107  // This unreachable is a temporary marker which will be removed later.
3108  llvm::Instruction *isActive = Builder.CreateUnreachable();
3109  args.addArgCleanupDeactivation(cleanup, isActive);
3110  }
3111 }
3112 
3113 static bool isProvablyNull(llvm::Value *addr) {
3114  return isa<llvm::ConstantPointerNull>(addr);
3115 }
3116 
3117 /// Emit the actual writing-back of a writeback.
3119  const CallArgList::Writeback &writeback) {
3120  const LValue &srcLV = writeback.Source;
3121  Address srcAddr = srcLV.getAddress();
3122  assert(!isProvablyNull(srcAddr.getPointer()) &&
3123  "shouldn't have writeback for provably null argument");
3124 
3125  llvm::BasicBlock *contBB = nullptr;
3126 
3127  // If the argument wasn't provably non-null, we need to null check
3128  // before doing the store.
3129  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3130  CGF.CGM.getDataLayout());
3131  if (!provablyNonNull) {
3132  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3133  contBB = CGF.createBasicBlock("icr.done");
3134 
3135  llvm::Value *isNull =
3136  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3137  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3138  CGF.EmitBlock(writebackBB);
3139  }
3140 
3141  // Load the value to writeback.
3142  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3143 
3144  // Cast it back, in case we're writing an id to a Foo* or something.
3145  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3146  "icr.writeback-cast");
3147 
3148  // Perform the writeback.
3149 
3150  // If we have a "to use" value, it's something we need to emit a use
3151  // of. This has to be carefully threaded in: if it's done after the
3152  // release it's potentially undefined behavior (and the optimizer
3153  // will ignore it), and if it happens before the retain then the
3154  // optimizer could move the release there.
3155  if (writeback.ToUse) {
3156  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3157 
3158  // Retain the new value. No need to block-copy here: the block's
3159  // being passed up the stack.
3160  value = CGF.EmitARCRetainNonBlock(value);
3161 
3162  // Emit the intrinsic use here.
3163  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3164 
3165  // Load the old value (primitively).
3166  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3167 
3168  // Put the new value in place (primitively).
3169  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3170 
3171  // Release the old value.
3172  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3173 
3174  // Otherwise, we can just do a normal lvalue store.
3175  } else {
3176  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3177  }
3178 
3179  // Jump to the continuation block.
3180  if (!provablyNonNull)
3181  CGF.EmitBlock(contBB);
3182 }
3183 
3185  const CallArgList &args) {
3186  for (const auto &I : args.writebacks())
3187  emitWriteback(CGF, I);
3188 }
3189 
3191  const CallArgList &CallArgs) {
3193  CallArgs.getCleanupsToDeactivate();
3194  // Iterate in reverse to increase the likelihood of popping the cleanup.
3195  for (const auto &I : llvm::reverse(Cleanups)) {
3196  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3197  I.IsActiveIP->eraseFromParent();
3198  }
3199 }
3200 
3201 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3202  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3203  if (uop->getOpcode() == UO_AddrOf)
3204  return uop->getSubExpr();
3205  return nullptr;
3206 }
3207 
3208 /// Emit an argument that's being passed call-by-writeback. That is,
3209 /// we are passing the address of an __autoreleased temporary; it
3210 /// might be copy-initialized with the current value of the given
3211 /// address, but it will definitely be copied out of after the call.
3213  const ObjCIndirectCopyRestoreExpr *CRE) {
3214  LValue srcLV;
3215 
3216  // Make an optimistic effort to emit the address as an l-value.
3217  // This can fail if the argument expression is more complicated.
3218  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3219  srcLV = CGF.EmitLValue(lvExpr);
3220 
3221  // Otherwise, just emit it as a scalar.
3222  } else {
3223  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3224 
3225  QualType srcAddrType =
3226  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3227  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3228  }
3229  Address srcAddr = srcLV.getAddress();
3230 
3231  // The dest and src types don't necessarily match in LLVM terms
3232  // because of the crazy ObjC compatibility rules.
3233 
3234  llvm::PointerType *destType =
3235  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3236 
3237  // If the address is a constant null, just pass the appropriate null.
3238  if (isProvablyNull(srcAddr.getPointer())) {
3239  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3240  CRE->getType());
3241  return;
3242  }
3243 
3244  // Create the temporary.
3245  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3246  CGF.getPointerAlign(),
3247  "icr.temp");
3248  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3249  // and that cleanup will be conditional if we can't prove that the l-value
3250  // isn't null, so we need to register a dominating point so that the cleanups
3251  // system will make valid IR.
3253 
3254  // Zero-initialize it if we're not doing a copy-initialization.
3255  bool shouldCopy = CRE->shouldCopy();
3256  if (!shouldCopy) {
3257  llvm::Value *null =
3258  llvm::ConstantPointerNull::get(
3259  cast<llvm::PointerType>(destType->getElementType()));
3260  CGF.Builder.CreateStore(null, temp);
3261  }
3262 
3263  llvm::BasicBlock *contBB = nullptr;
3264  llvm::BasicBlock *originBB = nullptr;
3265 
3266  // If the address is *not* known to be non-null, we need to switch.
3267  llvm::Value *finalArgument;
3268 
3269  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3270  CGF.CGM.getDataLayout());
3271  if (provablyNonNull) {
3272  finalArgument = temp.getPointer();
3273  } else {
3274  llvm::Value *isNull =
3275  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3276 
3277  finalArgument = CGF.Builder.CreateSelect(isNull,
3278  llvm::ConstantPointerNull::get(destType),
3279  temp.getPointer(), "icr.argument");
3280 
3281  // If we need to copy, then the load has to be conditional, which
3282  // means we need control flow.
3283  if (shouldCopy) {
3284  originBB = CGF.Builder.GetInsertBlock();
3285  contBB = CGF.createBasicBlock("icr.cont");
3286  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3287  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3288  CGF.EmitBlock(copyBB);
3289  condEval.begin(CGF);
3290  }
3291  }
3292 
3293  llvm::Value *valueToUse = nullptr;
3294 
3295  // Perform a copy if necessary.
3296  if (shouldCopy) {
3297  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3298  assert(srcRV.isScalar());
3299 
3300  llvm::Value *src = srcRV.getScalarVal();
3301  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3302  "icr.cast");
3303 
3304  // Use an ordinary store, not a store-to-lvalue.
3305  CGF.Builder.CreateStore(src, temp);
3306 
3307  // If optimization is enabled, and the value was held in a
3308  // __strong variable, we need to tell the optimizer that this
3309  // value has to stay alive until we're doing the store back.
3310  // This is because the temporary is effectively unretained,
3311  // and so otherwise we can violate the high-level semantics.
3312  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3314  valueToUse = src;
3315  }
3316  }
3317 
3318  // Finish the control flow if we needed it.
3319  if (shouldCopy && !provablyNonNull) {
3320  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3321  CGF.EmitBlock(contBB);
3322 
3323  // Make a phi for the value to intrinsically use.
3324  if (valueToUse) {
3325  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3326  "icr.to-use");
3327  phiToUse->addIncoming(valueToUse, copyBB);
3328  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3329  originBB);
3330  valueToUse = phiToUse;
3331  }
3332 
3333  condEval.end(CGF);
3334  }
3335 
3336  args.addWriteback(srcLV, temp, valueToUse);
3337  args.add(RValue::get(finalArgument), CRE->getType());
3338 }
3339 
3341  assert(!StackBase);
3342 
3343  // Save the stack.
3344  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3345  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3346 }
3347 
3349  if (StackBase) {
3350  // Restore the stack after the call.
3351  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3352  CGF.Builder.CreateCall(F, StackBase);
3353  }
3354 }
3355 
3357  SourceLocation ArgLoc,
3358  AbstractCallee AC,
3359  unsigned ParmNum) {
3360  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3361  SanOpts.has(SanitizerKind::NullabilityArg)))
3362  return;
3363 
3364  // The param decl may be missing in a variadic function.
3365  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3366  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3367 
3368  // Prefer the nonnull attribute if it's present.
3369  const NonNullAttr *NNAttr = nullptr;
3370  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3371  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3372 
3373  bool CanCheckNullability = false;
3374  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3375  auto Nullability = PVD->getType()->getNullability(getContext());
3376  CanCheckNullability = Nullability &&
3378  PVD->getTypeSourceInfo();
3379  }
3380 
3381  if (!NNAttr && !CanCheckNullability)
3382  return;
3383 
3384  SourceLocation AttrLoc;
3385  SanitizerMask CheckKind;
3386  SanitizerHandler Handler;
3387  if (NNAttr) {
3388  AttrLoc = NNAttr->getLocation();
3389  CheckKind = SanitizerKind::NonnullAttribute;
3390  Handler = SanitizerHandler::NonnullArg;
3391  } else {
3392  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3393  CheckKind = SanitizerKind::NullabilityArg;
3394  Handler = SanitizerHandler::NullabilityArg;
3395  }
3396 
3397  SanitizerScope SanScope(this);
3398  assert(RV.isScalar());
3399  llvm::Value *V = RV.getScalarVal();
3400  llvm::Value *Cond =
3401  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3402  llvm::Constant *StaticData[] = {
3403  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3404  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3405  };
3406  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3407 }
3408 
3410  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3411  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3412  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3413  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3414 
3415  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3416  // because arguments are destroyed left to right in the callee. As a special
3417  // case, there are certain language constructs that require left-to-right
3418  // evaluation, and in those cases we consider the evaluation order requirement
3419  // to trump the "destruction order is reverse construction order" guarantee.
3420  bool LeftToRight =
3421  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3422  ? Order == EvaluationOrder::ForceLeftToRight
3423  : Order != EvaluationOrder::ForceRightToLeft;
3424 
3425  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3426  RValue EmittedArg) {
3427  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3428  return;
3429  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3430  if (PS == nullptr)
3431  return;
3432 
3433  const auto &Context = getContext();
3434  auto SizeTy = Context.getSizeType();
3435  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3436  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3437  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3438  EmittedArg.getScalarVal());
3439  Args.add(RValue::get(V), SizeTy);
3440  // If we're emitting args in reverse, be sure to do so with
3441  // pass_object_size, as well.
3442  if (!LeftToRight)
3443  std::swap(Args.back(), *(&Args.back() - 1));
3444  };
3445 
3446  // Insert a stack save if we're going to need any inalloca args.
3447  bool HasInAllocaArgs = false;
3448  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3449  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3450  I != E && !HasInAllocaArgs; ++I)
3451  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3452  if (HasInAllocaArgs) {
3453  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3454  Args.allocateArgumentMemory(*this);
3455  }
3456  }
3457 
3458  // Evaluate each argument in the appropriate order.
3459  size_t CallArgsStart = Args.size();
3460  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3461  unsigned Idx = LeftToRight ? I : E - I - 1;
3462  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3463  unsigned InitialArgSize = Args.size();
3464  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3465  // the argument and parameter match or the objc method is parameterized.
3466  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3467  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3468  ArgTypes[Idx]) ||
3469  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3470  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3471  "Argument and parameter types don't match");
3472  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3473  // In particular, we depend on it being the last arg in Args, and the
3474  // objectsize bits depend on there only being one arg if !LeftToRight.
3475  assert(InitialArgSize + 1 == Args.size() &&
3476  "The code below depends on only adding one arg per EmitCallArg");
3477  (void)InitialArgSize;
3478  // Since pointer argument are never emitted as LValue, it is safe to emit
3479  // non-null argument check for r-value only.
3480  if (!Args.back().hasLValue()) {
3481  RValue RVArg = Args.back().getKnownRValue();
3482  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3483  ParamsToSkip + Idx);
3484  // @llvm.objectsize should never have side-effects and shouldn't need
3485  // destruction/cleanups, so we can safely "emit" it after its arg,
3486  // regardless of right-to-leftness
3487  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3488  }
3489  }
3490 
3491  if (!LeftToRight) {
3492  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3493  // IR function.
3494  std::reverse(Args.begin() + CallArgsStart, Args.end());
3495  }
3496 }
3497 
3498 namespace {
3499 
3500 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3501  DestroyUnpassedArg(Address Addr, QualType Ty)
3502  : Addr(Addr), Ty(Ty) {}
3503 
3504  Address Addr;
3505  QualType Ty;
3506 
3507  void Emit(CodeGenFunction &CGF, Flags flags) override {
3509  if (DtorKind == QualType::DK_cxx_destructor) {
3510  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3511  assert(!Dtor->isTrivial());
3512  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3513  /*Delegating=*/false, Addr);
3514  } else {
3515  CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3516  }
3517  }
3518 };
3519 
3520 struct DisableDebugLocationUpdates {
3521  CodeGenFunction &CGF;
3522  bool disabledDebugInfo;
3523  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3524  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3525  CGF.disableDebugInfo();
3526  }
3527  ~DisableDebugLocationUpdates() {
3528  if (disabledDebugInfo)
3529  CGF.enableDebugInfo();
3530  }
3531 };
3532 
3533 } // end anonymous namespace
3534 
3536  if (!HasLV)
3537  return RV;
3538  LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3540  LV.isVolatile());
3541  IsUsed = true;
3542  return RValue::getAggregate(Copy.getAddress());
3543 }
3544 
3546  LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3547  if (!HasLV && RV.isScalar())
3548  CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3549  else if (!HasLV && RV.isComplex())
3550  CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3551  else {
3552  auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3553  LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3554  // We assume that call args are never copied into subobjects.
3555  CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3556  HasLV ? LV.isVolatileQualified()
3557  : RV.isVolatileQualified());
3558  }
3559  IsUsed = true;
3560 }
3561 
3563  QualType type) {
3564  DisableDebugLocationUpdates Dis(*this, E);
3565  if (const ObjCIndirectCopyRestoreExpr *CRE
3566  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3567  assert(getLangOpts().ObjCAutoRefCount);
3568  return emitWritebackArg(*this, args, CRE);
3569  }
3570 
3571  assert(type->isReferenceType() == E->isGLValue() &&
3572  "reference binding to unmaterialized r-value!");
3573 
3574  if (E->isGLValue()) {
3575  assert(E->getObjectKind() == OK_Ordinary);
3576  return args.add(EmitReferenceBindingToExpr(E), type);
3577  }
3578 
3579  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3580 
3581  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3582  // However, we still have to push an EH-only cleanup in case we unwind before
3583  // we make it to the call.
3584  if (HasAggregateEvalKind &&
3586  // If we're using inalloca, use the argument memory. Otherwise, use a
3587  // temporary.
3588  AggValueSlot Slot;
3589  if (args.isUsingInAlloca())
3590  Slot = createPlaceholderSlot(*this, type);
3591  else
3592  Slot = CreateAggTemp(type, "agg.tmp");
3593 
3594  bool DestroyedInCallee = true, NeedsEHCleanup = true;
3595  if (const auto *RD = type->getAsCXXRecordDecl())
3596  DestroyedInCallee = RD->hasNonTrivialDestructor();
3597  else
3598  NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3599 
3600  if (DestroyedInCallee)
3601  Slot.setExternallyDestructed();
3602 
3603  EmitAggExpr(E, Slot);
3604  RValue RV = Slot.asRValue();
3605  args.add(RV, type);
3606 
3607  if (DestroyedInCallee && NeedsEHCleanup) {
3608  // Create a no-op GEP between the placeholder and the cleanup so we can
3609  // RAUW it successfully. It also serves as a marker of the first
3610  // instruction where the cleanup is active.
3611  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3612  type);
3613  // This unreachable is a temporary marker which will be removed later.
3614  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3615  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3616  }
3617  return;
3618  }
3619 
3620  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3621  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3622  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3623  assert(L.isSimple());
3624  args.addUncopiedAggregate(L, type);
3625  return;
3626  }
3627 
3628  args.add(EmitAnyExprToTemp(E), type);
3629 }
3630 
3631 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3632  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3633  // implicitly widens null pointer constants that are arguments to varargs
3634  // functions to pointer-sized ints.
3635  if (!getTarget().getTriple().isOSWindows())
3636  return Arg->getType();
3637 
3638  if (Arg->getType()->isIntegerType() &&
3639  getContext().getTypeSize(Arg->getType()) <
3643  return getContext().getIntPtrType();
3644  }
3645 
3646  return Arg->getType();
3647 }
3648 
3649 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3650 // optimizer it can aggressively ignore unwind edges.
3651 void
3652 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3653  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3654  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3655  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3656  CGM.getNoObjCARCExceptionsMetadata());
3657 }
3658 
3659 /// Emits a call to the given no-arguments nounwind runtime function.
3660 llvm::CallInst *
3662  const llvm::Twine &name) {
3663  return EmitNounwindRuntimeCall(callee, None, name);
3664 }
3665 
3666 /// Emits a call to the given nounwind runtime function.
3667 llvm::CallInst *
3670  const llvm::Twine &name) {
3671  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3672  call->setDoesNotThrow();
3673  return call;
3674 }
3675 
3676 /// Emits a simple call (never an invoke) to the given no-arguments
3677 /// runtime function.
3678 llvm::CallInst *
3680  const llvm::Twine &name) {
3681  return EmitRuntimeCall(callee, None, name);
3682 }
3683 
3684 // Calls which may throw must have operand bundles indicating which funclet
3685 // they are nested within.
3689  // There is no need for a funclet operand bundle if we aren't inside a
3690  // funclet.
3691  if (!CurrentFuncletPad)
3692  return BundleList;
3693 
3694  // Skip intrinsics which cannot throw.
3695  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3696  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3697  return BundleList;
3698 
3699  BundleList.emplace_back("funclet", CurrentFuncletPad);
3700  return BundleList;
3701 }
3702 
3703 /// Emits a simple call (never an invoke) to the given runtime function.
3704 llvm::CallInst *
3707  const llvm::Twine &name) {
3708  llvm::CallInst *call =
3709  Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3710  call->setCallingConv(getRuntimeCC());
3711  return call;
3712 }
3713 
3714 /// Emits a call or invoke to the given noreturn runtime function.
3716  ArrayRef<llvm::Value*> args) {
3718  getBundlesForFunclet(callee);
3719 
3720  if (getInvokeDest()) {
3721  llvm::InvokeInst *invoke =
3722  Builder.CreateInvoke(callee,
3723  getUnreachableBlock(),
3724  getInvokeDest(),
3725  args,
3726  BundleList);
3727  invoke->setDoesNotReturn();
3728  invoke->setCallingConv(getRuntimeCC());
3729  } else {
3730  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3731  call->setDoesNotReturn();
3732  call->setCallingConv(getRuntimeCC());
3733  Builder.CreateUnreachable();
3734  }
3735 }
3736 
3737 /// Emits a call or invoke instruction to the given nullary runtime function.
3738 llvm::CallSite
3740  const Twine &name) {
3741  return EmitRuntimeCallOrInvoke(callee, None, name);
3742 }
3743 
3744 /// Emits a call or invoke instruction to the given runtime function.
3745 llvm::CallSite
3748  const Twine &name) {
3749  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3750  callSite.setCallingConv(getRuntimeCC());
3751  return callSite;
3752 }
3753 
3754 /// Emits a call or invoke instruction to the given function, depending
3755 /// on the current state of the EH stack.
3756 llvm::CallSite
3759  const Twine &Name) {
3760  llvm::BasicBlock *InvokeDest = getInvokeDest();
3762  getBundlesForFunclet(Callee);
3763 
3764  llvm::Instruction *Inst;
3765  if (!InvokeDest)
3766  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3767  else {
3768  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3769  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3770  Name);
3771  EmitBlock(ContBB);
3772  }
3773 
3774  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3775  // optimizer it can aggressively ignore unwind edges.
3776  if (CGM.getLangOpts().ObjCAutoRefCount)
3777  AddObjCARCExceptionMetadata(Inst);
3778 
3779  return llvm::CallSite(Inst);
3780 }
3781 
3782 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3783  llvm::Value *New) {
3784  DeferredReplacements.push_back(std::make_pair(Old, New));
3785 }
3786 
3788  const CGCallee &Callee,
3789  ReturnValueSlot ReturnValue,
3790  const CallArgList &CallArgs,
3791  llvm::Instruction **callOrInvoke,
3792  SourceLocation Loc) {
3793  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3794 
3795  assert(Callee.isOrdinary() || Callee.isVirtual());
3796 
3797  // Handle struct-return functions by passing a pointer to the
3798  // location that we would like to return into.
3799  QualType RetTy = CallInfo.getReturnType();
3800  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3801 
3802  llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3803 
3804  // 1. Set up the arguments.
3805 
3806  // If we're using inalloca, insert the allocation after the stack save.
3807  // FIXME: Do this earlier rather than hacking it in here!
3808  Address ArgMemory = Address::invalid();
3809  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3810  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3811  const llvm::DataLayout &DL = CGM.getDataLayout();
3812  ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3813  llvm::Instruction *IP = CallArgs.getStackBase();
3814  llvm::AllocaInst *AI;
3815  if (IP) {
3816  IP = IP->getNextNode();
3817  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3818  "argmem", IP);
3819  } else {
3820  AI = CreateTempAlloca(ArgStruct, "argmem");
3821  }
3822  auto Align = CallInfo.getArgStructAlignment();
3823  AI->setAlignment(Align.getQuantity());
3824  AI->setUsedWithInAlloca(true);
3825  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3826  ArgMemory = Address(AI, Align);
3827  }
3828 
3829  // Helper function to drill into the inalloca allocation.
3830  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3831  auto FieldOffset =
3832  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3833  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3834  };
3835 
3836  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3837  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3838 
3839  // If the call returns a temporary with struct return, create a temporary
3840  // alloca to hold the result, unless one is given to us.
3841  Address SRetPtr = Address::invalid();
3842  Address SRetAlloca = Address::invalid();
3843  llvm::Value *UnusedReturnSizePtr = nullptr;
3844  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3845  if (!ReturnValue.isNull()) {
3846  SRetPtr = ReturnValue.getValue();
3847  } else {
3848  SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3849  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3850  uint64_t size =
3851  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3852  UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3853  }
3854  }
3855  if (IRFunctionArgs.hasSRetArg()) {
3856  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3857  } else if (RetAI.isInAlloca()) {
3858  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3859  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3860  }
3861  }
3862 
3863  Address swiftErrorTemp = Address::invalid();
3864  Address swiftErrorArg = Address::invalid();
3865 
3866  // Translate all of the arguments as necessary to match the IR lowering.
3867  assert(CallInfo.arg_size() == CallArgs.size() &&
3868  "Mismatch between function signature & arguments.");
3869  unsigned ArgNo = 0;
3870  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3871  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3872  I != E; ++I, ++info_it, ++ArgNo) {
3873  const ABIArgInfo &ArgInfo = info_it->info;
3874 
3875  // Insert a padding argument to ensure proper alignment.
3876  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3877  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3878  llvm::UndefValue::get(ArgInfo.getPaddingType());
3879 
3880  unsigned FirstIRArg, NumIRArgs;
3881  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3882 
3883  switch (ArgInfo.getKind()) {
3884  case ABIArgInfo::InAlloca: {
3885  assert(NumIRArgs == 0);
3886  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3887  if (I->isAggregate()) {
3888  // Replace the placeholder with the appropriate argument slot GEP.
3889  Address Addr = I->hasLValue()
3890  ? I->getKnownLValue().getAddress()
3891  : I->getKnownRValue().getAggregateAddress();
3892  llvm::Instruction *Placeholder =
3893  cast<llvm::Instruction>(Addr.getPointer());
3894  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3895  Builder.SetInsertPoint(Placeholder);
3896  Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3897  Builder.restoreIP(IP);
3898  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3899  } else {
3900  // Store the RValue into the argument struct.
3901  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3902  unsigned AS = Addr.getType()->getPointerAddressSpace();
3903  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3904  // There are some cases where a trivial bitcast is not avoidable. The
3905  // definition of a type later in a translation unit may change it's type
3906  // from {}* to (%struct.foo*)*.
3907  if (Addr.getType() != MemType)
3908  Addr = Builder.CreateBitCast(Addr, MemType);
3909  I->copyInto(*this, Addr);
3910  }
3911  break;
3912  }
3913 
3914  case ABIArgInfo::Indirect: {
3915  assert(NumIRArgs == 1);
3916  if (!I->isAggregate()) {
3917  // Make a temporary alloca to pass the argument.
3918  Address Addr = CreateMemTempWithoutCast(
3919  I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3920  IRCallArgs[FirstIRArg] = Addr.getPointer();
3921 
3922  I->copyInto(*this, Addr);
3923  } else {
3924  // We want to avoid creating an unnecessary temporary+copy here;
3925  // however, we need one in three cases:
3926  // 1. If the argument is not byval, and we are required to copy the
3927  // source. (This case doesn't occur on any common architecture.)
3928  // 2. If the argument is byval, RV is not sufficiently aligned, and
3929  // we cannot force it to be sufficiently aligned.
3930  // 3. If the argument is byval, but RV is not located in default
3931  // or alloca address space.
3932  Address Addr = I->hasLValue()
3933  ? I->getKnownLValue().getAddress()
3934  : I->getKnownRValue().getAggregateAddress();
3935  llvm::Value *V = Addr.getPointer();
3936  CharUnits Align = ArgInfo.getIndirectAlign();
3937  const llvm::DataLayout *TD = &CGM.getDataLayout();
3938 
3939  assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3940  IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3941  TD->getAllocaAddrSpace()) &&
3942  "indirect argument must be in alloca address space");
3943 
3944  bool NeedCopy = false;
3945 
3946  if (Addr.getAlignment() < Align &&
3947  llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3948  Align.getQuantity()) {
3949  NeedCopy = true;
3950  } else if (I->hasLValue()) {
3951  auto LV = I->getKnownLValue();
3952  auto AS = LV.getAddressSpace();
3953  if ((!ArgInfo.getIndirectByVal() &&
3954  (LV.getAlignment() >=
3955  getContext().getTypeAlignInChars(I->Ty))) ||
3956  (ArgInfo.getIndirectByVal() &&
3957  ((AS != LangAS::Default && AS != LangAS::opencl_private &&
3958  AS != CGM.getASTAllocaAddressSpace())))) {
3959  NeedCopy = true;
3960  }
3961  }
3962  if (NeedCopy) {
3963  // Create an aligned temporary, and copy to it.
3964  Address AI = CreateMemTempWithoutCast(
3965  I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3966  IRCallArgs[FirstIRArg] = AI.getPointer();
3967  I->copyInto(*this, AI);
3968  } else {
3969  // Skip the extra memcpy call.
3970  auto *T = V->getType()->getPointerElementType()->getPointerTo(
3971  CGM.getDataLayout().getAllocaAddrSpace());
3972  IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3973  *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3974  true);
3975  }
3976  }
3977  break;
3978  }
3979 
3980  case ABIArgInfo::Ignore:
3981  assert(NumIRArgs == 0);
3982  break;
3983 
3984  case ABIArgInfo::Extend:
3985  case ABIArgInfo::Direct: {
3986  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3987  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3988  ArgInfo.getDirectOffset() == 0) {
3989  assert(NumIRArgs == 1);
3990  llvm::Value *V;
3991  if (!I->isAggregate())
3992  V = I->getKnownRValue().getScalarVal();
3993  else
3994  V = Builder.CreateLoad(
3995  I->hasLValue() ? I->getKnownLValue().getAddress()
3996  : I->getKnownRValue().getAggregateAddress());
3997 
3998  // Implement swifterror by copying into a new swifterror argument.
3999  // We'll write back in the normal path out of the call.
4000  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4002  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4003 
4004  QualType pointeeTy = I->Ty->getPointeeType();
4005  swiftErrorArg =
4006  Address(V, getContext().getTypeAlignInChars(pointeeTy));
4007 
4008  swiftErrorTemp =
4009  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4010  V = swiftErrorTemp.getPointer();
4011  cast<llvm::AllocaInst>(V)->setSwiftError(true);
4012 
4013  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4014  Builder.CreateStore(errorValue, swiftErrorTemp);
4015  }
4016 
4017  // We might have to widen integers, but we should never truncate.
4018  if (ArgInfo.getCoerceToType() != V->getType() &&
4019  V->getType()->isIntegerTy())
4020  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4021 
4022  // If the argument doesn't match, perform a bitcast to coerce it. This
4023  // can happen due to trivial type mismatches.
4024  if (FirstIRArg < IRFuncTy->getNumParams() &&
4025  V->getType() != IRFuncTy->getParamType(FirstIRArg))
4026  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4027 
4028  IRCallArgs[FirstIRArg] = V;
4029  break;
4030  }
4031 
4032  // FIXME: Avoid the conversion through memory if possible.
4033  Address Src = Address::invalid();
4034  if (!I->isAggregate()) {
4035  Src = CreateMemTemp(I->Ty, "coerce");
4036  I->copyInto(*this, Src);
4037  } else {
4038  Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4039  : I->getKnownRValue().getAggregateAddress();
4040  }
4041 
4042  // If the value is offset in memory, apply the offset now.
4043  Src = emitAddressAtOffset(*this, Src, ArgInfo);
4044 
4045  // Fast-isel and the optimizer generally like scalar values better than
4046  // FCAs, so we flatten them if this is safe to do for this argument.
4047  llvm::StructType *STy =
4048  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4049  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4050  llvm::Type *SrcTy = Src.getType()->getElementType();
4051  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4052  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4053 
4054  // If the source type is smaller than the destination type of the
4055  // coerce-to logic, copy the source value into a temp alloca the size
4056  // of the destination type to allow loading all of it. The bits past
4057  // the source value are left undef.
4058  if (SrcSize < DstSize) {
4059  Address TempAlloca
4060  = CreateTempAlloca(STy, Src.getAlignment(),
4061  Src.getName() + ".coerce");
4062  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4063  Src = TempAlloca;
4064  } else {
4065  Src = Builder.CreateBitCast(Src,
4066  STy->getPointerTo(Src.getAddressSpace()));
4067  }
4068 
4069  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4070  assert(NumIRArgs == STy->getNumElements());
4071  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4072  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4073  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4074  llvm::Value *LI = Builder.CreateLoad(EltPtr);
4075  IRCallArgs[FirstIRArg + i] = LI;
4076  }
4077  } else {
4078  // In the simple case, just pass the coerced loaded value.
4079  assert(NumIRArgs == 1);
4080  IRCallArgs[FirstIRArg] =
4081  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4082  }
4083 
4084  break;
4085  }
4086 
4088  auto coercionType = ArgInfo.getCoerceAndExpandType();
4089  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4090 
4091  llvm::Value *tempSize = nullptr;
4092  Address addr = Address::invalid();
4093  Address AllocaAddr = Address::invalid();
4094  if (I->isAggregate()) {
4095  addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4096  : I->getKnownRValue().getAggregateAddress();
4097 
4098  } else {
4099  RValue RV = I->getKnownRValue();
4100  assert(RV.isScalar()); // complex should always just be direct
4101 
4102  llvm::Type *scalarType = RV.getScalarVal()->getType();
4103  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4104  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4105 
4106  // Materialize to a temporary.
4107  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4109  layout->getAlignment(), scalarAlign)),
4110  "tmp",
4111  /*ArraySize=*/nullptr, &AllocaAddr);
4112  tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4113 
4114  Builder.CreateStore(RV.getScalarVal(), addr);
4115  }
4116 
4117  addr = Builder.CreateElementBitCast(addr, coercionType);
4118 
4119  unsigned IRArgPos = FirstIRArg;
4120  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4121  llvm::Type *eltType = coercionType->getElementType(i);
4122  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4123  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4124  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4125  IRCallArgs[IRArgPos++] = elt;
4126  }
4127  assert(IRArgPos == FirstIRArg + NumIRArgs);
4128 
4129  if (tempSize) {
4130  EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4131  }
4132 
4133  break;
4134  }
4135 
4136  case ABIArgInfo::Expand:
4137  unsigned IRArgPos = FirstIRArg;
4138  ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4139  assert(IRArgPos == FirstIRArg + NumIRArgs);
4140  break;
4141  }
4142  }
4143 
4144  const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4145  llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4146 
4147  // If we're using inalloca, set up that argument.
4148  if (ArgMemory.isValid()) {
4149  llvm::Value *Arg = ArgMemory.getPointer();
4150  if (CallInfo.isVariadic()) {
4151  // When passing non-POD arguments by value to variadic functions, we will
4152  // end up with a variadic prototype and an inalloca call site. In such
4153  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4154  // the callee.
4155  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4156  auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4157  CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4158  } else {
4159  llvm::Type *LastParamTy =
4160  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4161  if (Arg->getType() != LastParamTy) {
4162 #ifndef NDEBUG
4163  // Assert that these structs have equivalent element types.
4164  llvm::StructType *FullTy = CallInfo.getArgStruct();
4165  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4166  cast<llvm::PointerType>(LastParamTy)->getElementType());
4167  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4168  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4169  DE = DeclaredTy->element_end(),
4170  FI = FullTy->element_begin();
4171  DI != DE; ++DI, ++FI)
4172  assert(*DI == *FI);
4173 #endif
4174  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4175  }
4176  }
4177  assert(IRFunctionArgs.hasInallocaArg());
4178  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4179  }
4180 
4181  // 2. Prepare the function pointer.
4182 
4183  // If the callee is a bitcast of a non-variadic function to have a
4184  // variadic function pointer type, check to see if we can remove the
4185  // bitcast. This comes up with unprototyped functions.
4186  //
4187  // This makes the IR nicer, but more importantly it ensures that we
4188  // can inline the function at -O0 if it is marked always_inline.
4189  auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4190  llvm::FunctionType *CalleeFT =
4191  cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4192  if (!CalleeFT->isVarArg())
4193  return Ptr;
4194 
4195  llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4196  if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4197  return Ptr;
4198 
4199  llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4200  if (!OrigFn)
4201  return Ptr;
4202 
4203  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4204 
4205  // If the original type is variadic, or if any of the component types
4206  // disagree, we cannot remove the cast.
4207  if (OrigFT->isVarArg() ||
4208  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4209  OrigFT->getReturnType() != CalleeFT->getReturnType())
4210  return Ptr;
4211 
4212  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4213  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4214  return Ptr;
4215 
4216  return OrigFn;
4217  };
4218  CalleePtr = simplifyVariadicCallee(CalleePtr);
4219 
4220  // 3. Perform the actual call.
4221 
4222  // Deactivate any cleanups that we're supposed to do immediately before
4223  // the call.
4224  if (!CallArgs.getCleanupsToDeactivate().empty())
4225  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4226 
4227  // Assert that the arguments we computed match up. The IR verifier
4228  // will catch this, but this is a common enough source of problems
4229  // during IRGen changes that it's way better for debugging to catch
4230  // it ourselves here.
4231 #ifndef NDEBUG
4232  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4233  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4234  // Inalloca argument can have different type.
4235  if (IRFunctionArgs.hasInallocaArg() &&
4236  i == IRFunctionArgs.getInallocaArgNo())
4237  continue;
4238  if (i < IRFuncTy->getNumParams())
4239  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4240  }
4241 #endif
4242 
4243  // Compute the calling convention and attributes.
4244  unsigned CallingConv;
4245  llvm::AttributeList Attrs;
4246  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4247  Callee.getAbstractInfo(), Attrs, CallingConv,
4248  /*AttrOnCallSite=*/true);
4249 
4250  // Apply some call-site-specific attributes.
4251  // TODO: work this into building the attribute set.
4252 
4253  // Apply always_inline to all calls within flatten functions.
4254  // FIXME: should this really take priority over __try, below?
4255  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4256  !(Callee.getAbstractInfo().getCalleeDecl() &&
4257  Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4258  Attrs =
4259  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4260  llvm::Attribute::AlwaysInline);
4261  }
4262 
4263  // Disable inlining inside SEH __try blocks.
4264  if (isSEHTryScope()) {
4265  Attrs =
4266  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4267  llvm::Attribute::NoInline);
4268  }
4269 
4270  // Decide whether to use a call or an invoke.
4271  bool CannotThrow;
4272  if (currentFunctionUsesSEHTry()) {
4273  // SEH cares about asynchronous exceptions, so everything can "throw."
4274  CannotThrow = false;
4275  } else if (isCleanupPadScope() &&
4277  // The MSVC++ personality will implicitly terminate the program if an
4278  // exception is thrown during a cleanup outside of a try/catch.
4279  // We don't need to model anything in IR to get this behavior.
4280  CannotThrow = true;
4281  } else {
4282  // Otherwise, nounwind call sites will never throw.
4283  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4284  llvm::Attribute::NoUnwind);
4285  }
4286 
4287  // If we made a temporary, be sure to clean up after ourselves. Note that we
4288  // can't depend on being inside of an ExprWithCleanups, so we need to manually
4289  // pop this cleanup later on. Being eager about this is OK, since this
4290  // temporary is 'invisible' outside of the callee.
4291  if (UnusedReturnSizePtr)
4292  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4293  UnusedReturnSizePtr);
4294 
4295  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4296 
4298  getBundlesForFunclet(CalleePtr);
4299 
4300  // Emit the actual call/invoke instruction.
4301  llvm::CallSite CS;
4302  if (!InvokeDest) {
4303  CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4304  } else {
4305  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4306  CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4307  BundleList);
4308  EmitBlock(Cont);
4309  }
4310  llvm::Instruction *CI = CS.getInstruction();
4311  if (callOrInvoke)
4312  *callOrInvoke = CI;
4313 
4314  // Apply the attributes and calling convention.
4315  CS.setAttributes(Attrs);
4316  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4317 
4318  // Apply various metadata.
4319 
4320  if (!CI->getType()->isVoidTy())
4321  CI->setName("call");
4322 
4323  // Insert instrumentation or attach profile metadata at indirect call sites.
4324  // For more details, see the comment before the definition of
4325  // IPVK_IndirectCallTarget in InstrProfData.inc.
4326  if (!CS.getCalledFunction())
4327  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4328  CI, CalleePtr);
4329 
4330  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4331  // optimizer it can aggressively ignore unwind edges.
4332  if (CGM.getLangOpts().ObjCAutoRefCount)
4333  AddObjCARCExceptionMetadata(CI);
4334 
4335  // Suppress tail calls if requested.
4336  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4337  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4338  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4339  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4340  }
4341 
4342  // 4. Finish the call.
4343 
4344  // If the call doesn't return, finish the basic block and clear the
4345  // insertion point; this allows the rest of IRGen to discard
4346  // unreachable code.
4347  if (CS.doesNotReturn()) {
4348  if (UnusedReturnSizePtr)
4349  PopCleanupBlock();
4350 
4351  // Strip away the noreturn attribute to better diagnose unreachable UB.
4352  if (SanOpts.has(SanitizerKind::Unreachable)) {
4353  if (auto *F = CS.getCalledFunction())
4354  F->removeFnAttr(llvm::Attribute::NoReturn);
4355  CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4356  llvm::Attribute::NoReturn);
4357  }
4358 
4359  EmitUnreachable(Loc);
4360  Builder.ClearInsertionPoint();
4361 
4362  // FIXME: For now, emit a dummy basic block because expr emitters in
4363  // generally are not ready to handle emitting expressions at unreachable
4364  // points.
4365  EnsureInsertPoint();
4366 
4367  // Return a reasonable RValue.
4368  return GetUndefRValue(RetTy);
4369  }
4370 
4371  // Perform the swifterror writeback.
4372  if (swiftErrorTemp.isValid()) {
4373  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4374  Builder.CreateStore(errorResult, swiftErrorArg);
4375  }
4376 
4377  // Emit any call-associated writebacks immediately. Arguably this
4378  // should happen after any return-value munging.
4379  if (CallArgs.hasWritebacks())
4380  emitWritebacks(*this, CallArgs);
4381 
4382  // The stack cleanup for inalloca arguments has to run out of the normal
4383  // lexical order, so deactivate it and run it manually here.
4384  CallArgs.freeArgumentMemory(*this);
4385 
4386  // Extract the return value.
4387  RValue Ret = [&] {
4388  switch (RetAI.getKind()) {
4390  auto coercionType = RetAI.getCoerceAndExpandType();
4391  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4392 
4393  Address addr = SRetPtr;
4394  addr = Builder.CreateElementBitCast(addr, coercionType);
4395 
4396  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4397  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4398 
4399  unsigned unpaddedIndex = 0;
4400  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4401  llvm::Type *eltType = coercionType->getElementType(i);
4402  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4403  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4404  llvm::Value *elt = CI;
4405  if (requiresExtract)
4406  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4407  else
4408  assert(unpaddedIndex == 0);
4409  Builder.CreateStore(elt, eltAddr);
4410  }
4411  // FALLTHROUGH
4412  LLVM_FALLTHROUGH;
4413  }
4414 
4415  case ABIArgInfo::InAlloca:
4416  case ABIArgInfo::Indirect: {
4417  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4418  if (UnusedReturnSizePtr)
4419  PopCleanupBlock();
4420  return ret;
4421  }
4422 
4423  case ABIArgInfo::Ignore:
4424  // If we are ignoring an argument that had a result, make sure to
4425  // construct the appropriate return value for our caller.
4426  return GetUndefRValue(RetTy);
4427 
4428  case ABIArgInfo::Extend:
4429  case ABIArgInfo::Direct: {
4430  llvm::Type *RetIRTy = ConvertType(RetTy);
4431  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4432  switch (getEvaluationKind(RetTy)) {
4433  case TEK_Complex: {
4434  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4435  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4436  return RValue::getComplex(std::make_pair(Real, Imag));
4437  }
4438  case TEK_Aggregate: {
4439  Address DestPtr = ReturnValue.getValue();
4440  bool DestIsVolatile = ReturnValue.isVolatile();
4441 
4442  if (!DestPtr.isValid()) {
4443  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4444  DestIsVolatile = false;
4445  }
4446  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4447  return RValue::getAggregate(DestPtr);
4448  }
4449  case TEK_Scalar: {
4450  // If the argument doesn't match, perform a bitcast to coerce it. This
4451  // can happen due to trivial type mismatches.
4452  llvm::Value *V = CI;
4453  if (V->getType() != RetIRTy)
4454  V = Builder.CreateBitCast(V, RetIRTy);
4455  return RValue::get(V);
4456  }
4457  }
4458  llvm_unreachable("bad evaluation kind");
4459  }
4460 
4461  Address DestPtr = ReturnValue.getValue();
4462  bool DestIsVolatile = ReturnValue.isVolatile();
4463 
4464  if (!DestPtr.isValid()) {
4465  DestPtr = CreateMemTemp(RetTy, "coerce");
4466  DestIsVolatile = false;
4467  }
4468 
4469  // If the value is offset in memory, apply the offset now.
4470  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4471  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4472 
4473  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4474  }
4475 
4476  case ABIArgInfo::Expand:
4477  llvm_unreachable("Invalid ABI kind for return argument");
4478  }
4479 
4480  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4481  } ();
4482 
4483  // Emit the assume_aligned check on the return value.
4484  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4485  if (Ret.isScalar() && TargetDecl) {
4486  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4487  llvm::Value *OffsetValue = nullptr;
4488  if (const auto *Offset = AA->getOffset())
4489  OffsetValue = EmitScalarExpr(Offset);
4490 
4491  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4492  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4493  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4494  OffsetValue);
4495  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4496  llvm::Value *ParamVal =
4497  CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
4498  *this).getScalarVal();
4499  EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4500  }
4501  }
4502 
4503  return Ret;
4504 }
4505 
4507  if (isVirtual()) {
4508  const CallExpr *CE = getVirtualCallExpr();
4510  CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
4511  CE ? CE->getBeginLoc() : SourceLocation());
4512  }
4513 
4514  return *this;
4515 }
4516 
4517 /* VarArg handling */
4518 
4520  VAListAddr = VE->isMicrosoftABI()
4521  ? EmitMSVAListRef(VE->getSubExpr())
4522  : EmitVAListRef(VE->getSubExpr());
4523  QualType Ty = VE->getType();
4524  if (VE->isMicrosoftABI())
4525  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4526  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4527 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:653
const llvm::DataLayout & getDataLayout() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1518
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:177
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:361
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Represents a function declaration or definition.
Definition: Decl.h:1732
Address getAddress() const
Definition: CGValue.h:580
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:627
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
Definition: CGCall.cpp:2976
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2533
Complete object ctor.
Definition: ABI.h:26
CanQualType VoidPtrTy
Definition: ASTContext.h:1053
A (possibly-)qualified type.
Definition: Type.h:642
bool isBlockPointerType() const
Definition: Type.h:6278
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses &#39;sret&#39; when used as a return type.
Definition: CGCall.cpp:1500
bool getNoCfCheck() const
Definition: Type.h:3511
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition: CGCall.cpp:259
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:79
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3118
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1112
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:71
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2710
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:562
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:175
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3351
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:187
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:497
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:949
const Decl * getCalleeDecl() const
Definition: CGCall.h:63
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:4035
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:2219
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:838
Extend - Valid only for integer argument types.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1025
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4519
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3113
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:87
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:242
bool isVirtual() const
Definition: DeclCXX.h:2086
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Definition: CGCall.cpp:4506
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const Expr * getSubExpr() const
Definition: Expr.h:3924
void addUncopiedAggregate(LValue LV, QualType type)
Definition: CGCall.h:287
bool isVolatile() const
Definition: CGValue.h:301
The base class of the type hierarchy.
Definition: Type.h:1415
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1898
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2176
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:6110
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:958
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:699
const ParmVarDecl * getParamDecl(unsigned I) const
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3757
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:1965
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:372
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2478
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2156
bool hasWritebacks() const
Definition: CGCall.h:312
Default closure variant of a ctor.
Definition: ABI.h:30
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3787
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
Represents a variable declaration or definition.
Definition: Decl.h:812
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:104
llvm::Instruction * getStackBase() const
Definition: CGCall.h:334
unsigned getNumParams() const
Definition: Type.h:3869
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:178
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1216
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6683
void setCoerceToType(llvm::Type *T)
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3540
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:139
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3356
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:234
llvm::Value * getPointer() const
Definition: Address.h:38
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:300
Address getValue() const
Definition: CGCall.h:381
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
Represents a parameter to a function.
Definition: Decl.h:1551
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:57
void add(RValue rvalue, QualType type)
Definition: CGCall.h:285
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:46
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:420
Represents a struct/union/class.
Definition: Decl.h:3585
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3348
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:348
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2452
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1018
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3184
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2790
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:3978
Address getAddress() const
Definition: CGValue.h:327
unsigned getRegParm() const
Definition: Type.h:3514
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:4039
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
field_range fields() const
Definition: Decl.h:3776
bool isVolatileQualified() const
Definition: CGValue.h:258
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2198
Represents a member of a struct/union/class.
Definition: Decl.h:2571
CharUnits getAlignment() const
Definition: CGValue.h:316
RequiredArgs getRequiredArgs() const
bool isUsingInAlloca() const
Returns if we&#39;re using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:339
unsigned getFunctionScopeIndex() const
Returns the index of this parameter in its prototype or method scope.
Definition: Decl.h:1604
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isOrdinary() const
Definition: CGCall.h:169
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:266
CharUnits getArgStructAlignment() const
bool isReferenceType() const
Definition: Type.h:6282
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2188
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that&#39;s being passed call-by-writeback.
Definition: CGCall.cpp:3212
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:514
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
bool isVirtual() const
Definition: CGCall.h:187
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Decl.h:738
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:321
bool getProducesResult() const
Definition: Type.h:3509
llvm::FunctionType * getFunctionType() const
Definition: CGCall.h:203
bool isGLValue() const
Definition: Expr.h:251
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:285
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2583
void copyInto(CodeGenFunction &CGF, Address A) const
Definition: CGCall.cpp:3545
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:157
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
uint32_t Offset
Definition: CacheTokens.cpp:43
llvm::StructType * getCoerceAndExpandType() const
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:215
Wrapper for source info for functions.
Definition: TypeLoc.h:1327
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:109
unsigned getInAllocaFieldIndex() const
const_arg_iterator arg_begin() const
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:66
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:390
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1821
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:134
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:468
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:702
Values of this type can never be null.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
bool isSimple() const
Definition: CGValue.h:252
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:273
bool isInstance() const
Definition: DeclCXX.h:2069
An ordinary object is located at an address in memory.
Definition: Specifiers.h:126
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1697
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:106
FunctionType::ExtInfo getExtInfo() const
QualType getReturnType() const
Definition: DeclObjC.h:323
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:737
bool getNoReturn() const
Definition: Type.h:3508
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:84
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:71
bool getNoCallerSavedRegs() const
Definition: Type.h:3510
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3562
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:510
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Derives the &#39;this&#39; type for codegen purposes, i.e.
Definition: CGCall.cpp:73
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Definition: TargetInfo.h:305
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3567
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:499
Represents a K&R-style &#39;int foo()&#39; function, which has no information available about its arguments...
Definition: Type.h:3639
bool hasAttr() const
Definition: DeclBase.h:531
CanQualType getReturnType() const
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:359
bool isValid() const
Definition: Address.h:36
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1605
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3676
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:3929
const TargetCodeGenInfo & getTargetCodeGenInfo()
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
writeback_const_range writebacks() const
Definition: CGCall.h:317
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:306
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:3061
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:3906
Address Temporary
The temporary alloca.
Definition: CGCall.h:271
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:107
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:274
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3417
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:3040
This represents one expression.
Definition: Expr.h:105
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2686
static Address invalid()
Definition: Address.h:35
llvm::Type * getUnpaddedCoerceAndExpandType() const
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:3035
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:705
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:88
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:66
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:695
bool getHasRegParm() const
Definition: Type.h:3512
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6746
bool isObjCRetainableType() const
Definition: Type.cpp:3889
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2700
llvm::Constant * objc_retain
id objc_retain(id);
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:356
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2571
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:3687
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when &#39;sret&#39; is used as a return type...
Definition: CGCall.cpp:1505
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:640
QualType getType() const
Definition: Expr.h:127
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1346
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes &#39;this&#39; as the first parameter followed by varargs.
Definition: CGCall.cpp:529
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2728
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:197
llvm::PointerType * AllocaInt8PtrTy
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1789
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:2020
ASTContext & getContext() const
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:406
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1162
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:236
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:35
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1268
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:411
CanQualType getCanonicalTypeUnqualified() const
LValue getKnownLValue() const
Definition: CGCall.h:240
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Definition: Type.h:4356
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1293
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:169
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses &#39;fpret&#39; when used as a return type.
Definition: CGCall.cpp:1510
CanProxy< U > castAs() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3201
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3366
Encodes a location in the source.
QualType getReturnType() const
Definition: Type.h:3607
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2072
A saved depth on the scope stack.
Definition: EHScopeStack.h:107
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:290
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3746
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3390
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1246
CallingConv getCC() const
Definition: Type.h:3521
const Decl * getDecl() const
Definition: GlobalDecl.h:64
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C &#39;SEL&#39; type.
Definition: ASTContext.h:1865
An aggregate value slot.
Definition: CGValue.h:437
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:455
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2041
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2419
const_arg_iterator arg_end() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ObjCEntrypoints & getObjCEntrypoints() const
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3340
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:717
CanQualType VoidTy
Definition: ASTContext.h:1025
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
bool isAnyPointerType() const
Definition: Type.h:6274
An aligned address.
Definition: Address.h:25
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after...
Definition: Type.h:1160
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:711
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:178
All available information about a concrete callee.
Definition: CGCall.h:67
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:364
Complete object dtor.
Definition: ABI.h:36
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses &#39;fp2ret&#39; when used as a return type.
Definition: CGCall.cpp:1527
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1688
bool hasFlexibleArrayMember() const
Definition: Decl.h:3639
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3903
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:620
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:554
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1126
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:356
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:176
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2376
ExtInfo getExtInfo() const
Definition: Type.h:3618
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:93
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
LValue Source
The original argument.
Definition: CGCall.h:268
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:431
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3715
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:91
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:1000
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:167
Interesting information about a specific parameter that can&#39;t simply be reflected in parameter&#39;s type...
Definition: Type.h:3377
void EmitARCIntrinsicUse(ArrayRef< llvm::Value *> values)
Given a number of pointers, inform the optimizer that they&#39;re being intrinsically used up until this ...
Definition: CGObjC.cpp:1806
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2166
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
RValue getRValue(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3535
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:796
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:401
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
uint64_t SanitizerMask
Definition: Sanitizers.h:26
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4346
Complex values, per C99 6.2.5p11.
Definition: Type.h:2473
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
static bool classof(const OMPClause *T)
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:2013
QualType getCanonicalTypeInternal() const
Definition: Type.h:2354
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:6531
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable &#39;self&#39;, remove it.
Definition: CGCall.cpp:2671
CharUnits getIndirectAlign() const
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:44
T * getAttr() const
Definition: DeclBase.h:527
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:645
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:120
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
Expand - Only valid for aggregate argument types.
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2669
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type *>::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:978
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:903
bool isParamDestroyedInCallee() const
Definition: Decl.h:3723
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:445
Represents a base class of a C++ class.
Definition: DeclCXX.h:192
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2076
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:2023
ASTContext & getContext() const
Definition: CodeGenTypes.h:174
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:134
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:513
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl...
Definition: CGCall.cpp:1672
LangAS getAddressSpace() const
Definition: CGValue.h:314
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
RValue getKnownRValue() const
Definition: CGCall.h:244
Represents a C++ struct/union/class.
Definition: DeclCXX.h:300
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:338
bool isVoidType() const
Definition: Type.h:6497
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2214
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6073
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *