clang  7.0.0svn
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
47  switch (CC) {
48  default: return llvm::CallingConv::C;
49  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53  case CC_Win64: return llvm::CallingConv::Win64;
54  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58  // TODO: Add support for __pascal to LLVM.
60  // TODO: Add support for __vectorcall to LLVM.
61  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66  case CC_Swift: return llvm::CallingConv::Swift;
67  }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
71 /// qualification.
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76 }
77 
78 /// Returns the canonical formal type of the given C++ method.
80  return MD->getType()->getCanonicalTypeUnqualified()
82 }
83 
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type. Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
90 }
91 
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
96  // When translating an unprototyped function type, always use a
97  // variadic type.
98  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99  /*instanceMethod=*/false,
100  /*chainCall=*/false, None,
101  FTNP->getExtInfo(), {}, RequiredArgs(0));
102 }
103 
106  const FunctionProtoType *proto,
107  unsigned prefixArgs,
108  unsigned totalArgs) {
109  assert(proto->hasExtParameterInfos());
110  assert(paramInfos.size() <= prefixArgs);
111  assert(proto->getNumParams() + prefixArgs <= totalArgs);
112 
113  paramInfos.reserve(totalArgs);
114 
115  // Add default infos for any prefix args that don't already have infos.
116  paramInfos.resize(prefixArgs);
117 
118  // Add infos for the prototype.
119  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120  paramInfos.push_back(ParamInfo);
121  // pass_object_size params have no parameter info.
122  if (ParamInfo.hasPassObjectSize())
123  paramInfos.emplace_back();
124  }
125 
126  assert(paramInfos.size() <= totalArgs &&
127  "Did we forget to insert pass_object_size args?");
128  // Add default infos for the variadic and/or suffix arguments.
129  paramInfos.resize(totalArgs);
130 }
131 
132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
138  // Fast path: don't touch param info if we don't need to.
139  if (!FPT->hasExtParameterInfos()) {
140  assert(paramInfos.empty() &&
141  "We have paramInfos, but the prototype doesn't?");
142  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143  return;
144  }
145 
146  unsigned PrefixSize = prefix.size();
147  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148  // parameters; the only thing that can change this is the presence of
149  // pass_object_size. So, we preallocate for the common case.
150  prefix.reserve(prefix.size() + FPT->getNumParams());
151 
152  auto ExtInfos = FPT->getExtParameterInfos();
153  assert(ExtInfos.size() == FPT->getNumParams());
154  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155  prefix.push_back(FPT->getParamType(I));
156  if (ExtInfos[I].hasPassObjectSize())
157  prefix.push_back(CGT.getContext().getSizeType());
158  }
159 
160  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161  prefix.size());
162 }
163 
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
170  const FunctionDecl *FD) {
171  SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
172  RequiredArgs Required =
173  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174  // FIXME: Kill copy.
175  appendParameterTypes(CGT, prefix, paramInfos, FTP);
176  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177 
178  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179  /*chainCall=*/false, prefix,
180  FTP->getExtInfo(), paramInfos,
181  Required);
182 }
183 
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
188  const FunctionDecl *FD) {
190  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191  FTP, FD);
192 }
193 
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195  // Set the appropriate calling convention for the Function.
196  if (D->hasAttr<StdCallAttr>())
197  return CC_X86StdCall;
198 
199  if (D->hasAttr<FastCallAttr>())
200  return CC_X86FastCall;
201 
202  if (D->hasAttr<RegCallAttr>())
203  return CC_X86RegCall;
204 
205  if (D->hasAttr<ThisCallAttr>())
206  return CC_X86ThisCall;
207 
208  if (D->hasAttr<VectorCallAttr>())
209  return CC_X86VectorCall;
210 
211  if (D->hasAttr<PascalAttr>())
212  return CC_X86Pascal;
213 
214  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216 
217  if (D->hasAttr<IntelOclBiccAttr>())
218  return CC_IntelOclBicc;
219 
220  if (D->hasAttr<MSABIAttr>())
221  return IsWindows ? CC_C : CC_Win64;
222 
223  if (D->hasAttr<SysVABIAttr>())
224  return IsWindows ? CC_X86_64SysV : CC_C;
225 
226  if (D->hasAttr<PreserveMostAttr>())
227  return CC_PreserveMost;
228 
229  if (D->hasAttr<PreserveAllAttr>())
230  return CC_PreserveAll;
231 
232  return CC_C;
233 }
234 
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 /// so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
243  const FunctionProtoType *FTP,
244  const CXXMethodDecl *MD) {
246 
247  // Add the 'this' pointer.
248  if (RD)
249  argTypes.push_back(GetThisType(Context, RD));
250  else
251  argTypes.push_back(Context.VoidPtrTy);
252 
254  *this, true, argTypes,
256 }
257 
258 /// Set calling convention for CUDA/HIP kernel.
260  const FunctionDecl *FD) {
261  if (FD->hasAttr<CUDAGlobalAttr>()) {
262  const FunctionType *FT = FTy->getAs<FunctionType>();
264  FTy = FT->getCanonicalTypeUnqualified();
265  }
266 }
267 
268 /// Arrange the argument and result information for a declaration or
269 /// definition of the given C++ non-static member function. The
270 /// member function must be an ordinary function, i.e. not a
271 /// constructor or destructor.
272 const CGFunctionInfo &
274  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
275  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
276 
277  CanQualType FT = GetFormalType(MD).getAs<Type>();
278  setCUDAKernelCallingConvention(FT, CGM, MD);
279  auto prototype = FT.getAs<FunctionProtoType>();
280 
281  if (MD->isInstance()) {
282  // The abstract case is perfectly fine.
283  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
284  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
285  }
286 
287  return arrangeFreeFunctionType(prototype, MD);
288 }
289 
291  const InheritedConstructor &Inherited, CXXCtorType Type) {
292  // Parameters are unnecessary if we're constructing a base class subobject
293  // and the inherited constructor lives in a virtual base.
294  return Type == Ctor_Complete ||
295  !Inherited.getShadowDecl()->constructsVirtualBase() ||
296  !Target.getCXXABI().hasConstructorVariants();
297  }
298 
299 const CGFunctionInfo &
301  StructorType Type) {
302 
305  argTypes.push_back(GetThisType(Context, MD->getParent()));
306 
307  bool PassParams = true;
308 
309  GlobalDecl GD;
310  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
311  GD = GlobalDecl(CD, toCXXCtorType(Type));
312 
313  // A base class inheriting constructor doesn't get forwarded arguments
314  // needed to construct a virtual base (or base class thereof).
315  if (auto Inherited = CD->getInheritedConstructor())
316  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
317  } else {
318  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
319  GD = GlobalDecl(DD, toCXXDtorType(Type));
320  }
321 
323 
324  // Add the formal parameters.
325  if (PassParams)
326  appendParameterTypes(*this, argTypes, paramInfos, FTP);
327 
328  CGCXXABI::AddedStructorArgs AddedArgs =
329  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
330  if (!paramInfos.empty()) {
331  // Note: prefix implies after the first param.
332  if (AddedArgs.Prefix)
333  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
335  if (AddedArgs.Suffix)
336  paramInfos.append(AddedArgs.Suffix,
338  }
339 
340  RequiredArgs required =
341  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
343 
344  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
345  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
346  ? argTypes.front()
347  : TheCXXABI.hasMostDerivedReturn(GD)
348  ? CGM.getContext().VoidPtrTy
349  : Context.VoidTy;
350  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
351  /*chainCall=*/false, argTypes, extInfo,
352  paramInfos, required);
353 }
354 
357  SmallVector<CanQualType, 16> argTypes;
358  for (auto &arg : args)
359  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
360  return argTypes;
361 }
362 
363 static SmallVector<CanQualType, 16>
365  SmallVector<CanQualType, 16> argTypes;
366  for (auto &arg : args)
367  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
368  return argTypes;
369 }
370 
373  unsigned prefixArgs, unsigned totalArgs) {
375  if (proto->hasExtParameterInfos()) {
376  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
377  }
378  return result;
379 }
380 
381 /// Arrange a call to a C++ method, passing the given arguments.
382 ///
383 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
384 /// parameter.
385 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
386 /// args.
387 /// PassProtoArgs indicates whether `args` has args for the parameters in the
388 /// given CXXConstructorDecl.
389 const CGFunctionInfo &
391  const CXXConstructorDecl *D,
392  CXXCtorType CtorKind,
393  unsigned ExtraPrefixArgs,
394  unsigned ExtraSuffixArgs,
395  bool PassProtoArgs) {
396  // FIXME: Kill copy.
398  for (const auto &Arg : args)
399  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
400 
401  // +1 for implicit this, which should always be args[0].
402  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
403 
405  RequiredArgs Required =
406  RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
407  GlobalDecl GD(D, CtorKind);
408  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
409  ? ArgTypes.front()
410  : TheCXXABI.hasMostDerivedReturn(GD)
411  ? CGM.getContext().VoidPtrTy
412  : Context.VoidTy;
413 
414  FunctionType::ExtInfo Info = FPT->getExtInfo();
416  // If the prototype args are elided, we should only have ABI-specific args,
417  // which never have param info.
418  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
419  // ABI-specific suffix arguments are treated the same as variadic arguments.
420  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
421  ArgTypes.size());
422  }
423  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
424  /*chainCall=*/false, ArgTypes, Info,
425  ParamInfos, Required);
426 }
427 
428 /// Arrange the argument and result information for the declaration or
429 /// definition of the given function.
430 const CGFunctionInfo &
432  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
433  if (MD->isInstance())
434  return arrangeCXXMethodDeclaration(MD);
435 
437 
438  assert(isa<FunctionType>(FTy));
439  setCUDAKernelCallingConvention(FTy, CGM, FD);
440 
441  // When declaring a function without a prototype, always use a
442  // non-variadic type.
445  noProto->getReturnType(), /*instanceMethod=*/false,
446  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
447  }
448 
450 }
451 
452 /// Arrange the argument and result information for the declaration or
453 /// definition of an Objective-C method.
454 const CGFunctionInfo &
456  // It happens that this is the same as a call with no optional
457  // arguments, except also using the formal 'self' type.
459 }
460 
461 /// Arrange the argument and result information for the function type
462 /// through which to perform a send to the given Objective-C method,
463 /// using the given receiver type. The receiver type is not always
464 /// the 'self' type of the method or even an Objective-C pointer type.
465 /// This is *not* the right method for actually performing such a
466 /// message send, due to the possibility of optional arguments.
467 const CGFunctionInfo &
469  QualType receiverType) {
472  argTys.push_back(Context.getCanonicalParamType(receiverType));
473  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
474  // FIXME: Kill copy?
475  for (const auto *I : MD->parameters()) {
476  argTys.push_back(Context.getCanonicalParamType(I->getType()));
478  I->hasAttr<NoEscapeAttr>());
479  extParamInfos.push_back(extParamInfo);
480  }
481 
482  FunctionType::ExtInfo einfo;
483  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
484  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
485 
486  if (getContext().getLangOpts().ObjCAutoRefCount &&
487  MD->hasAttr<NSReturnsRetainedAttr>())
488  einfo = einfo.withProducesResult(true);
489 
490  RequiredArgs required =
491  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
492 
494  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
495  /*chainCall=*/false, argTys, einfo, extParamInfos, required);
496 }
497 
498 const CGFunctionInfo &
500  const CallArgList &args) {
501  auto argTypes = getArgTypesForCall(Context, args);
502  FunctionType::ExtInfo einfo;
503 
505  GetReturnType(returnType), /*instanceMethod=*/false,
506  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
507 }
508 
509 const CGFunctionInfo &
511  // FIXME: Do we need to handle ObjCMethodDecl?
512  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
513 
514  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
516 
517  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
519 
520  return arrangeFunctionDeclaration(FD);
521 }
522 
523 /// Arrange a thunk that takes 'this' as the first parameter followed by
524 /// varargs. Return a void pointer, regardless of the actual return type.
525 /// The body of the thunk will end in a musttail call to a function of the
526 /// correct type, and the caller will bitcast the function to the correct
527 /// prototype.
528 const CGFunctionInfo &
530  assert(MD->isVirtual() && "only methods have thunks");
532  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
533  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534  /*chainCall=*/false, ArgTys,
535  FTP->getExtInfo(), {}, RequiredArgs(1));
536 }
537 
538 const CGFunctionInfo &
540  CXXCtorType CT) {
541  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
542 
545  const CXXRecordDecl *RD = CD->getParent();
546  ArgTys.push_back(GetThisType(Context, RD));
547  if (CT == Ctor_CopyingClosure)
548  ArgTys.push_back(*FTP->param_type_begin());
549  if (RD->getNumVBases() > 0)
550  ArgTys.push_back(Context.IntTy);
552  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554  /*chainCall=*/false, ArgTys,
555  FunctionType::ExtInfo(CC), {},
557 }
558 
559 /// Arrange a call as unto a free function, except possibly with an
560 /// additional number of formal parameters considered required.
561 static const CGFunctionInfo &
563  CodeGenModule &CGM,
564  const CallArgList &args,
565  const FunctionType *fnType,
566  unsigned numExtraRequiredArgs,
567  bool chainCall) {
568  assert(args.size() >= numExtraRequiredArgs);
569 
571 
572  // In most cases, there are no optional arguments.
573  RequiredArgs required = RequiredArgs::All;
574 
575  // If we have a variadic prototype, the required arguments are the
576  // extra prefix plus the arguments in the prototype.
577  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578  if (proto->isVariadic())
579  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
580 
581  if (proto->hasExtParameterInfos())
582  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583  args.size());
584 
585  // If we don't have a prototype at all, but we're supposed to
586  // explicitly use the variadic convention for unprototyped calls,
587  // treat all of the arguments as required but preserve the nominal
588  // possibility of variadics.
589  } else if (CGM.getTargetCodeGenInfo()
590  .isNoProtoCallVariadic(args,
591  cast<FunctionNoProtoType>(fnType))) {
592  required = RequiredArgs(args.size());
593  }
594 
595  // FIXME: Kill copy.
596  SmallVector<CanQualType, 16> argTypes;
597  for (const auto &arg : args)
598  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
600  /*instanceMethod=*/false, chainCall,
601  argTypes, fnType->getExtInfo(), paramInfos,
602  required);
603 }
604 
605 /// Figure out the rules for calling a function with the given formal
606 /// type using the given arguments. The arguments are necessary
607 /// because the function might be unprototyped, in which case it's
608 /// target-dependent in crazy ways.
609 const CGFunctionInfo &
611  const FunctionType *fnType,
612  bool chainCall) {
613  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614  chainCall ? 1 : 0, chainCall);
615 }
616 
617 /// A block function is essentially a free function with an
618 /// extra implicit argument.
619 const CGFunctionInfo &
621  const FunctionType *fnType) {
622  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623  /*chainCall=*/false);
624 }
625 
626 const CGFunctionInfo &
628  const FunctionArgList &params) {
629  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630  auto argTypes = getArgTypesForDeclaration(Context, params);
631 
633  GetReturnType(proto->getReturnType()),
634  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
635  proto->getExtInfo(), paramInfos,
636  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
637 }
638 
639 const CGFunctionInfo &
641  const CallArgList &args) {
642  // FIXME: Kill copy.
644  for (const auto &Arg : args)
645  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
647  GetReturnType(resultType), /*instanceMethod=*/false,
648  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
649  /*paramInfos=*/ {}, RequiredArgs::All);
650 }
651 
652 const CGFunctionInfo &
654  const FunctionArgList &args) {
655  auto argTypes = getArgTypesForDeclaration(Context, args);
656 
658  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
659  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
660 }
661 
662 const CGFunctionInfo &
664  ArrayRef<CanQualType> argTypes) {
666  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
667  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
668 }
669 
670 /// Arrange a call to a C++ method, passing the given arguments.
671 ///
672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
673 /// does not count `this`.
674 const CGFunctionInfo &
676  const FunctionProtoType *proto,
677  RequiredArgs required,
678  unsigned numPrefixArgs) {
679  assert(numPrefixArgs + 1 <= args.size() &&
680  "Emitting a call with less args than the required prefix?");
681  // Add one to account for `this`. It's a bit awkward here, but we don't count
682  // `this` in similar places elsewhere.
683  auto paramInfos =
684  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
685 
686  // FIXME: Kill copy.
687  auto argTypes = getArgTypesForCall(Context, args);
688 
689  FunctionType::ExtInfo info = proto->getExtInfo();
691  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
692  /*chainCall=*/false, argTypes, info, paramInfos, required);
693 }
694 
697  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
699 }
700 
701 const CGFunctionInfo &
703  const CallArgList &args) {
704  assert(signature.arg_size() <= args.size());
705  if (signature.arg_size() == args.size())
706  return signature;
707 
709  auto sigParamInfos = signature.getExtParameterInfos();
710  if (!sigParamInfos.empty()) {
711  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
712  paramInfos.resize(args.size());
713  }
714 
715  auto argTypes = getArgTypesForCall(Context, args);
716 
717  assert(signature.getRequiredArgs().allowsOptionalArgs());
718  return arrangeLLVMFunctionInfo(signature.getReturnType(),
719  signature.isInstanceMethod(),
720  signature.isChainCall(),
721  argTypes,
722  signature.getExtInfo(),
723  paramInfos,
724  signature.getRequiredArgs());
725 }
726 
727 namespace clang {
728 namespace CodeGen {
730 }
731 }
732 
733 /// Arrange the argument and result information for an abstract value
734 /// of a given function type. This is the method which all of the
735 /// above functions ultimately defer to.
736 const CGFunctionInfo &
738  bool instanceMethod,
739  bool chainCall,
740  ArrayRef<CanQualType> argTypes,
743  RequiredArgs required) {
744  assert(std::all_of(argTypes.begin(), argTypes.end(),
745  [](CanQualType T) { return T.isCanonicalAsParam(); }));
746 
747  // Lookup or create unique function info.
748  llvm::FoldingSetNodeID ID;
749  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
750  required, resultType, argTypes);
751 
752  void *insertPos = nullptr;
753  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
754  if (FI)
755  return *FI;
756 
757  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
758 
759  // Construct the function info. We co-allocate the ArgInfos.
760  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
761  paramInfos, resultType, argTypes, required);
762  FunctionInfos.InsertNode(FI, insertPos);
763 
764  bool inserted = FunctionsBeingProcessed.insert(FI).second;
765  (void)inserted;
766  assert(inserted && "Recursively being processed?");
767 
768  // Compute ABI information.
769  if (CC == llvm::CallingConv::SPIR_KERNEL) {
770  // Force target independent argument handling for the host visible
771  // kernel functions.
772  computeSPIRKernelABIInfo(CGM, *FI);
773  } else if (info.getCC() == CC_Swift) {
774  swiftcall::computeABIInfo(CGM, *FI);
775  } else {
776  getABIInfo().computeInfo(*FI);
777  }
778 
779  // Loop over all of the computed argument and return value info. If any of
780  // them are direct or extend without a specified coerce type, specify the
781  // default now.
782  ABIArgInfo &retInfo = FI->getReturnInfo();
783  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
784  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
785 
786  for (auto &I : FI->arguments())
787  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
788  I.info.setCoerceToType(ConvertType(I.type));
789 
790  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
791  assert(erased && "Not in set?");
792 
793  return *FI;
794 }
795 
797  bool instanceMethod,
798  bool chainCall,
799  const FunctionType::ExtInfo &info,
800  ArrayRef<ExtParameterInfo> paramInfos,
801  CanQualType resultType,
802  ArrayRef<CanQualType> argTypes,
803  RequiredArgs required) {
804  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
805 
806  void *buffer =
807  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
808  argTypes.size() + 1, paramInfos.size()));
809 
810  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
811  FI->CallingConvention = llvmCC;
812  FI->EffectiveCallingConvention = llvmCC;
813  FI->ASTCallingConvention = info.getCC();
814  FI->InstanceMethod = instanceMethod;
815  FI->ChainCall = chainCall;
816  FI->NoReturn = info.getNoReturn();
817  FI->ReturnsRetained = info.getProducesResult();
818  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
819  FI->NoCfCheck = info.getNoCfCheck();
820  FI->Required = required;
821  FI->HasRegParm = info.getHasRegParm();
822  FI->RegParm = info.getRegParm();
823  FI->ArgStruct = nullptr;
824  FI->ArgStructAlign = 0;
825  FI->NumArgs = argTypes.size();
826  FI->HasExtParameterInfos = !paramInfos.empty();
827  FI->getArgsBuffer()[0].type = resultType;
828  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
829  FI->getArgsBuffer()[i + 1].type = argTypes[i];
830  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
831  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
832  return FI;
833 }
834 
835 /***/
836 
837 namespace {
838 // ABIArgInfo::Expand implementation.
839 
840 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
841 struct TypeExpansion {
842  enum TypeExpansionKind {
843  // Elements of constant arrays are expanded recursively.
844  TEK_ConstantArray,
845  // Record fields are expanded recursively (but if record is a union, only
846  // the field with the largest size is expanded).
847  TEK_Record,
848  // For complex types, real and imaginary parts are expanded recursively.
849  TEK_Complex,
850  // All other types are not expandable.
851  TEK_None
852  };
853 
854  const TypeExpansionKind Kind;
855 
856  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
857  virtual ~TypeExpansion() {}
858 };
859 
860 struct ConstantArrayExpansion : TypeExpansion {
861  QualType EltTy;
862  uint64_t NumElts;
863 
864  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
865  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
866  static bool classof(const TypeExpansion *TE) {
867  return TE->Kind == TEK_ConstantArray;
868  }
869 };
870 
871 struct RecordExpansion : TypeExpansion {
873 
875 
876  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
878  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
879  Fields(std::move(Fields)) {}
880  static bool classof(const TypeExpansion *TE) {
881  return TE->Kind == TEK_Record;
882  }
883 };
884 
885 struct ComplexExpansion : TypeExpansion {
886  QualType EltTy;
887 
888  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
889  static bool classof(const TypeExpansion *TE) {
890  return TE->Kind == TEK_Complex;
891  }
892 };
893 
894 struct NoExpansion : TypeExpansion {
895  NoExpansion() : TypeExpansion(TEK_None) {}
896  static bool classof(const TypeExpansion *TE) {
897  return TE->Kind == TEK_None;
898  }
899 };
900 } // namespace
901 
902 static std::unique_ptr<TypeExpansion>
903 getTypeExpansion(QualType Ty, const ASTContext &Context) {
904  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
905  return llvm::make_unique<ConstantArrayExpansion>(
906  AT->getElementType(), AT->getSize().getZExtValue());
907  }
908  if (const RecordType *RT = Ty->getAs<RecordType>()) {
909  SmallVector<const CXXBaseSpecifier *, 1> Bases;
910  SmallVector<const FieldDecl *, 1> Fields;
911  const RecordDecl *RD = RT->getDecl();
912  assert(!RD->hasFlexibleArrayMember() &&
913  "Cannot expand structure with flexible array.");
914  if (RD->isUnion()) {
915  // Unions can be here only in degenerative cases - all the fields are same
916  // after flattening. Thus we have to use the "largest" field.
917  const FieldDecl *LargestFD = nullptr;
918  CharUnits UnionSize = CharUnits::Zero();
919 
920  for (const auto *FD : RD->fields()) {
921  if (FD->isZeroLengthBitField(Context))
922  continue;
923  assert(!FD->isBitField() &&
924  "Cannot expand structure with bit-field members.");
925  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
926  if (UnionSize < FieldSize) {
927  UnionSize = FieldSize;
928  LargestFD = FD;
929  }
930  }
931  if (LargestFD)
932  Fields.push_back(LargestFD);
933  } else {
934  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
935  assert(!CXXRD->isDynamicClass() &&
936  "cannot expand vtable pointers in dynamic classes");
937  for (const CXXBaseSpecifier &BS : CXXRD->bases())
938  Bases.push_back(&BS);
939  }
940 
941  for (const auto *FD : RD->fields()) {
942  if (FD->isZeroLengthBitField(Context))
943  continue;
944  assert(!FD->isBitField() &&
945  "Cannot expand structure with bit-field members.");
946  Fields.push_back(FD);
947  }
948  }
949  return llvm::make_unique<RecordExpansion>(std::move(Bases),
950  std::move(Fields));
951  }
952  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
953  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
954  }
955  return llvm::make_unique<NoExpansion>();
956 }
957 
958 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
959  auto Exp = getTypeExpansion(Ty, Context);
960  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
961  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
962  }
963  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
964  int Res = 0;
965  for (auto BS : RExp->Bases)
966  Res += getExpansionSize(BS->getType(), Context);
967  for (auto FD : RExp->Fields)
968  Res += getExpansionSize(FD->getType(), Context);
969  return Res;
970  }
971  if (isa<ComplexExpansion>(Exp.get()))
972  return 2;
973  assert(isa<NoExpansion>(Exp.get()));
974  return 1;
975 }
976 
977 void
980  auto Exp = getTypeExpansion(Ty, Context);
981  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
982  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
983  getExpandedTypes(CAExp->EltTy, TI);
984  }
985  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
986  for (auto BS : RExp->Bases)
987  getExpandedTypes(BS->getType(), TI);
988  for (auto FD : RExp->Fields)
989  getExpandedTypes(FD->getType(), TI);
990  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
991  llvm::Type *EltTy = ConvertType(CExp->EltTy);
992  *TI++ = EltTy;
993  *TI++ = EltTy;
994  } else {
995  assert(isa<NoExpansion>(Exp.get()));
996  *TI++ = ConvertType(Ty);
997  }
998 }
999 
1001  ConstantArrayExpansion *CAE,
1002  Address BaseAddr,
1003  llvm::function_ref<void(Address)> Fn) {
1004  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1005  CharUnits EltAlign =
1006  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1007 
1008  for (int i = 0, n = CAE->NumElts; i < n; i++) {
1009  llvm::Value *EltAddr =
1010  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1011  Fn(Address(EltAddr, EltAlign));
1012  }
1013 }
1014 
1015 void CodeGenFunction::ExpandTypeFromArgs(
1017  assert(LV.isSimple() &&
1018  "Unexpected non-simple lvalue during struct expansion.");
1019 
1020  auto Exp = getTypeExpansion(Ty, getContext());
1021  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1022  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1023  [&](Address EltAddr) {
1024  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1025  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1026  });
1027  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1028  Address This = LV.getAddress();
1029  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1030  // Perform a single step derived-to-base conversion.
1031  Address Base =
1032  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1033  /*NullCheckValue=*/false, SourceLocation());
1034  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1035 
1036  // Recurse onto bases.
1037  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1038  }
1039  for (auto FD : RExp->Fields) {
1040  // FIXME: What are the right qualifiers here?
1041  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1042  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1043  }
1044  } else if (isa<ComplexExpansion>(Exp.get())) {
1045  auto realValue = *AI++;
1046  auto imagValue = *AI++;
1047  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1048  } else {
1049  assert(isa<NoExpansion>(Exp.get()));
1050  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1051  }
1052 }
1053 
1054 void CodeGenFunction::ExpandTypeToArgs(
1055  QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1056  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1057  auto Exp = getTypeExpansion(Ty, getContext());
1058  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1059  Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1062  *this, CAExp, Addr, [&](Address EltAddr) {
1063  CallArg EltArg = CallArg(
1064  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1065  CAExp->EltTy);
1066  ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1067  IRCallArgPos);
1068  });
1069  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1070  Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1072  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1073  // Perform a single step derived-to-base conversion.
1074  Address Base =
1075  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1076  /*NullCheckValue=*/false, SourceLocation());
1077  CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1078 
1079  // Recurse onto bases.
1080  ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1081  IRCallArgPos);
1082  }
1083 
1084  LValue LV = MakeAddrLValue(This, Ty);
1085  for (auto FD : RExp->Fields) {
1086  CallArg FldArg =
1087  CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1088  ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1089  IRCallArgPos);
1090  }
1091  } else if (isa<ComplexExpansion>(Exp.get())) {
1093  IRCallArgs[IRCallArgPos++] = CV.first;
1094  IRCallArgs[IRCallArgPos++] = CV.second;
1095  } else {
1096  assert(isa<NoExpansion>(Exp.get()));
1097  auto RV = Arg.getKnownRValue();
1098  assert(RV.isScalar() &&
1099  "Unexpected non-scalar rvalue during struct expansion.");
1100 
1101  // Insert a bitcast as needed.
1102  llvm::Value *V = RV.getScalarVal();
1103  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1104  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1105  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1106 
1107  IRCallArgs[IRCallArgPos++] = V;
1108  }
1109 }
1110 
1111 /// Create a temporary allocation for the purposes of coercion.
1113  CharUnits MinAlign) {
1114  // Don't use an alignment that's worse than what LLVM would prefer.
1115  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1116  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1117 
1118  return CGF.CreateTempAlloca(Ty, Align);
1119 }
1120 
1121 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1122 /// accessing some number of bytes out of it, try to gep into the struct to get
1123 /// at its inner goodness. Dive as deep as possible without entering an element
1124 /// with an in-memory size smaller than DstSize.
1125 static Address
1127  llvm::StructType *SrcSTy,
1128  uint64_t DstSize, CodeGenFunction &CGF) {
1129  // We can't dive into a zero-element struct.
1130  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1131 
1132  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1133 
1134  // If the first elt is at least as large as what we're looking for, or if the
1135  // first element is the same size as the whole struct, we can enter it. The
1136  // comparison must be made on the store size and not the alloca size. Using
1137  // the alloca size may overstate the size of the load.
1138  uint64_t FirstEltSize =
1139  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1140  if (FirstEltSize < DstSize &&
1141  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1142  return SrcPtr;
1143 
1144  // GEP into the first element.
1145  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1146 
1147  // If the first element is a struct, recurse.
1148  llvm::Type *SrcTy = SrcPtr.getElementType();
1149  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1150  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1151 
1152  return SrcPtr;
1153 }
1154 
1155 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1156 /// are either integers or pointers. This does a truncation of the value if it
1157 /// is too large or a zero extension if it is too small.
1158 ///
1159 /// This behaves as if the value were coerced through memory, so on big-endian
1160 /// targets the high bits are preserved in a truncation, while little-endian
1161 /// targets preserve the low bits.
1163  llvm::Type *Ty,
1164  CodeGenFunction &CGF) {
1165  if (Val->getType() == Ty)
1166  return Val;
1167 
1168  if (isa<llvm::PointerType>(Val->getType())) {
1169  // If this is Pointer->Pointer avoid conversion to and from int.
1170  if (isa<llvm::PointerType>(Ty))
1171  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1172 
1173  // Convert the pointer to an integer so we can play with its width.
1174  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1175  }
1176 
1177  llvm::Type *DestIntTy = Ty;
1178  if (isa<llvm::PointerType>(DestIntTy))
1179  DestIntTy = CGF.IntPtrTy;
1180 
1181  if (Val->getType() != DestIntTy) {
1182  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1183  if (DL.isBigEndian()) {
1184  // Preserve the high bits on big-endian targets.
1185  // That is what memory coercion does.
1186  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1187  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1188 
1189  if (SrcSize > DstSize) {
1190  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1191  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1192  } else {
1193  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1194  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1195  }
1196  } else {
1197  // Little-endian targets preserve the low bits. No shifts required.
1198  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1199  }
1200  }
1201 
1202  if (isa<llvm::PointerType>(Ty))
1203  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1204  return Val;
1205 }
1206 
1207 
1208 
1209 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1210 /// a pointer to an object of type \arg Ty, known to be aligned to
1211 /// \arg SrcAlign bytes.
1212 ///
1213 /// This safely handles the case when the src type is smaller than the
1214 /// destination type; in this situation the values of bits which not
1215 /// present in the src are undefined.
1217  CodeGenFunction &CGF) {
1218  llvm::Type *SrcTy = Src.getElementType();
1219 
1220  // If SrcTy and Ty are the same, just do a load.
1221  if (SrcTy == Ty)
1222  return CGF.Builder.CreateLoad(Src);
1223 
1224  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1225 
1226  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1227  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1228  SrcTy = Src.getType()->getElementType();
1229  }
1230 
1231  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1232 
1233  // If the source and destination are integer or pointer types, just do an
1234  // extension or truncation to the desired type.
1235  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1236  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1237  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1238  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1239  }
1240 
1241  // If load is legal, just bitcast the src pointer.
1242  if (SrcSize >= DstSize) {
1243  // Generally SrcSize is never greater than DstSize, since this means we are
1244  // losing bits. However, this can happen in cases where the structure has
1245  // additional padding, for example due to a user specified alignment.
1246  //
1247  // FIXME: Assert that we aren't truncating non-padding bits when have access
1248  // to that information.
1249  Src = CGF.Builder.CreateBitCast(Src,
1250  Ty->getPointerTo(Src.getAddressSpace()));
1251  return CGF.Builder.CreateLoad(Src);
1252  }
1253 
1254  // Otherwise do coercion through memory. This is stupid, but simple.
1255  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1256  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1257  Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
1258  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1259  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1260  false);
1261  return CGF.Builder.CreateLoad(Tmp);
1262 }
1263 
1264 // Function to store a first-class aggregate into memory. We prefer to
1265 // store the elements rather than the aggregate to be more friendly to
1266 // fast-isel.
1267 // FIXME: Do we need to recurse here?
1269  Address Dest, bool DestIsVolatile) {
1270  // Prefer scalar stores to first-class aggregate stores.
1271  if (llvm::StructType *STy =
1272  dyn_cast<llvm::StructType>(Val->getType())) {
1273  const llvm::StructLayout *Layout =
1274  CGF.CGM.getDataLayout().getStructLayout(STy);
1275 
1276  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1277  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1278  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1279  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1280  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1281  }
1282  } else {
1283  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1284  }
1285 }
1286 
1287 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1288 /// where the source and destination may have different types. The
1289 /// destination is known to be aligned to \arg DstAlign bytes.
1290 ///
1291 /// This safely handles the case when the src type is larger than the
1292 /// destination type; the upper bits of the src will be lost.
1294  Address Dst,
1295  bool DstIsVolatile,
1296  CodeGenFunction &CGF) {
1297  llvm::Type *SrcTy = Src->getType();
1298  llvm::Type *DstTy = Dst.getType()->getElementType();
1299  if (SrcTy == DstTy) {
1300  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1301  return;
1302  }
1303 
1304  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1305 
1306  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1307  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1308  DstTy = Dst.getType()->getElementType();
1309  }
1310 
1311  // If the source and destination are integer or pointer types, just do an
1312  // extension or truncation to the desired type.
1313  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1314  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1315  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1316  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1317  return;
1318  }
1319 
1320  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1321 
1322  // If store is legal, just bitcast the src pointer.
1323  if (SrcSize <= DstSize) {
1324  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1325  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1326  } else {
1327  // Otherwise do coercion through memory. This is stupid, but
1328  // simple.
1329 
1330  // Generally SrcSize is never greater than DstSize, since this means we are
1331  // losing bits. However, this can happen in cases where the structure has
1332  // additional padding, for example due to a user specified alignment.
1333  //
1334  // FIXME: Assert that we aren't truncating non-padding bits when have access
1335  // to that information.
1336  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1337  CGF.Builder.CreateStore(Src, Tmp);
1338  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1339  Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
1340  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1341  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1342  false);
1343  }
1344 }
1345 
1347  const ABIArgInfo &info) {
1348  if (unsigned offset = info.getDirectOffset()) {
1349  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1350  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1351  CharUnits::fromQuantity(offset));
1352  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1353  }
1354  return addr;
1355 }
1356 
1357 namespace {
1358 
1359 /// Encapsulates information about the way function arguments from
1360 /// CGFunctionInfo should be passed to actual LLVM IR function.
1361 class ClangToLLVMArgMapping {
1362  static const unsigned InvalidIndex = ~0U;
1363  unsigned InallocaArgNo;
1364  unsigned SRetArgNo;
1365  unsigned TotalIRArgs;
1366 
1367  /// Arguments of LLVM IR function corresponding to single Clang argument.
1368  struct IRArgs {
1369  unsigned PaddingArgIndex;
1370  // Argument is expanded to IR arguments at positions
1371  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1372  unsigned FirstArgIndex;
1373  unsigned NumberOfArgs;
1374 
1375  IRArgs()
1376  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1377  NumberOfArgs(0) {}
1378  };
1379 
1380  SmallVector<IRArgs, 8> ArgInfo;
1381 
1382 public:
1383  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1384  bool OnlyRequiredArgs = false)
1385  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1386  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1387  construct(Context, FI, OnlyRequiredArgs);
1388  }
1389 
1390  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1391  unsigned getInallocaArgNo() const {
1392  assert(hasInallocaArg());
1393  return InallocaArgNo;
1394  }
1395 
1396  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1397  unsigned getSRetArgNo() const {
1398  assert(hasSRetArg());
1399  return SRetArgNo;
1400  }
1401 
1402  unsigned totalIRArgs() const { return TotalIRArgs; }
1403 
1404  bool hasPaddingArg(unsigned ArgNo) const {
1405  assert(ArgNo < ArgInfo.size());
1406  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1407  }
1408  unsigned getPaddingArgNo(unsigned ArgNo) const {
1409  assert(hasPaddingArg(ArgNo));
1410  return ArgInfo[ArgNo].PaddingArgIndex;
1411  }
1412 
1413  /// Returns index of first IR argument corresponding to ArgNo, and their
1414  /// quantity.
1415  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1416  assert(ArgNo < ArgInfo.size());
1417  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1418  ArgInfo[ArgNo].NumberOfArgs);
1419  }
1420 
1421 private:
1422  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1423  bool OnlyRequiredArgs);
1424 };
1425 
1426 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1427  const CGFunctionInfo &FI,
1428  bool OnlyRequiredArgs) {
1429  unsigned IRArgNo = 0;
1430  bool SwapThisWithSRet = false;
1431  const ABIArgInfo &RetAI = FI.getReturnInfo();
1432 
1433  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1434  SwapThisWithSRet = RetAI.isSRetAfterThis();
1435  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1436  }
1437 
1438  unsigned ArgNo = 0;
1439  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1440  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1441  ++I, ++ArgNo) {
1442  assert(I != FI.arg_end());
1443  QualType ArgType = I->type;
1444  const ABIArgInfo &AI = I->info;
1445  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1446  auto &IRArgs = ArgInfo[ArgNo];
1447 
1448  if (AI.getPaddingType())
1449  IRArgs.PaddingArgIndex = IRArgNo++;
1450 
1451  switch (AI.getKind()) {
1452  case ABIArgInfo::Extend:
1453  case ABIArgInfo::Direct: {
1454  // FIXME: handle sseregparm someday...
1455  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1456  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1457  IRArgs.NumberOfArgs = STy->getNumElements();
1458  } else {
1459  IRArgs.NumberOfArgs = 1;
1460  }
1461  break;
1462  }
1463  case ABIArgInfo::Indirect:
1464  IRArgs.NumberOfArgs = 1;
1465  break;
1466  case ABIArgInfo::Ignore:
1467  case ABIArgInfo::InAlloca:
1468  // ignore and inalloca doesn't have matching LLVM parameters.
1469  IRArgs.NumberOfArgs = 0;
1470  break;
1472  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1473  break;
1474  case ABIArgInfo::Expand:
1475  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1476  break;
1477  }
1478 
1479  if (IRArgs.NumberOfArgs > 0) {
1480  IRArgs.FirstArgIndex = IRArgNo;
1481  IRArgNo += IRArgs.NumberOfArgs;
1482  }
1483 
1484  // Skip over the sret parameter when it comes second. We already handled it
1485  // above.
1486  if (IRArgNo == 1 && SwapThisWithSRet)
1487  IRArgNo++;
1488  }
1489  assert(ArgNo == ArgInfo.size());
1490 
1491  if (FI.usesInAlloca())
1492  InallocaArgNo = IRArgNo++;
1493 
1494  TotalIRArgs = IRArgNo;
1495 }
1496 } // namespace
1497 
1498 /***/
1499 
1501  const auto &RI = FI.getReturnInfo();
1502  return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1503 }
1504 
1506  return ReturnTypeUsesSRet(FI) &&
1507  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1508 }
1509 
1511  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1512  switch (BT->getKind()) {
1513  default:
1514  return false;
1515  case BuiltinType::Float:
1517  case BuiltinType::Double:
1519  case BuiltinType::LongDouble:
1521  }
1522  }
1523 
1524  return false;
1525 }
1526 
1528  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1529  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1530  if (BT->getKind() == BuiltinType::LongDouble)
1532  }
1533  }
1534 
1535  return false;
1536 }
1537 
1539  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1540  return GetFunctionType(FI);
1541 }
1542 
1543 llvm::FunctionType *
1545 
1546  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1547  (void)Inserted;
1548  assert(Inserted && "Recursively being processed?");
1549 
1550  llvm::Type *resultType = nullptr;
1551  const ABIArgInfo &retAI = FI.getReturnInfo();
1552  switch (retAI.getKind()) {
1553  case ABIArgInfo::Expand:
1554  llvm_unreachable("Invalid ABI kind for return argument");
1555 
1556  case ABIArgInfo::Extend:
1557  case ABIArgInfo::Direct:
1558  resultType = retAI.getCoerceToType();
1559  break;
1560 
1561  case ABIArgInfo::InAlloca:
1562  if (retAI.getInAllocaSRet()) {
1563  // sret things on win32 aren't void, they return the sret pointer.
1564  QualType ret = FI.getReturnType();
1565  llvm::Type *ty = ConvertType(ret);
1566  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1567  resultType = llvm::PointerType::get(ty, addressSpace);
1568  } else {
1569  resultType = llvm::Type::getVoidTy(getLLVMContext());
1570  }
1571  break;
1572 
1573  case ABIArgInfo::Indirect:
1574  case ABIArgInfo::Ignore:
1575  resultType = llvm::Type::getVoidTy(getLLVMContext());
1576  break;
1577 
1579  resultType = retAI.getUnpaddedCoerceAndExpandType();
1580  break;
1581  }
1582 
1583  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1584  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1585 
1586  // Add type for sret argument.
1587  if (IRFunctionArgs.hasSRetArg()) {
1588  QualType Ret = FI.getReturnType();
1589  llvm::Type *Ty = ConvertType(Ret);
1590  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1591  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1592  llvm::PointerType::get(Ty, AddressSpace);
1593  }
1594 
1595  // Add type for inalloca argument.
1596  if (IRFunctionArgs.hasInallocaArg()) {
1597  auto ArgStruct = FI.getArgStruct();
1598  assert(ArgStruct);
1599  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1600  }
1601 
1602  // Add in all of the required arguments.
1603  unsigned ArgNo = 0;
1605  ie = it + FI.getNumRequiredArgs();
1606  for (; it != ie; ++it, ++ArgNo) {
1607  const ABIArgInfo &ArgInfo = it->info;
1608 
1609  // Insert a padding type to ensure proper alignment.
1610  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1611  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1612  ArgInfo.getPaddingType();
1613 
1614  unsigned FirstIRArg, NumIRArgs;
1615  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1616 
1617  switch (ArgInfo.getKind()) {
1618  case ABIArgInfo::Ignore:
1619  case ABIArgInfo::InAlloca:
1620  assert(NumIRArgs == 0);
1621  break;
1622 
1623  case ABIArgInfo::Indirect: {
1624  assert(NumIRArgs == 1);
1625  // indirect arguments are always on the stack, which is alloca addr space.
1626  llvm::Type *LTy = ConvertTypeForMem(it->type);
1627  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1628  CGM.getDataLayout().getAllocaAddrSpace());
1629  break;
1630  }
1631 
1632  case ABIArgInfo::Extend:
1633  case ABIArgInfo::Direct: {
1634  // Fast-isel and the optimizer generally like scalar values better than
1635  // FCAs, so we flatten them if this is safe to do for this argument.
1636  llvm::Type *argType = ArgInfo.getCoerceToType();
1637  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1638  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1639  assert(NumIRArgs == st->getNumElements());
1640  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1641  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1642  } else {
1643  assert(NumIRArgs == 1);
1644  ArgTypes[FirstIRArg] = argType;
1645  }
1646  break;
1647  }
1648 
1650  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1651  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1652  *ArgTypesIter++ = EltTy;
1653  }
1654  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1655  break;
1656  }
1657 
1658  case ABIArgInfo::Expand:
1659  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1660  getExpandedTypes(it->type, ArgTypesIter);
1661  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1662  break;
1663  }
1664  }
1665 
1666  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1667  assert(Erased && "Not in set?");
1668 
1669  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1670 }
1671 
1673  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1674  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1675 
1676  if (!isFuncTypeConvertible(FPT))
1677  return llvm::StructType::get(getLLVMContext());
1678 
1679  const CGFunctionInfo *Info;
1680  if (isa<CXXDestructorDecl>(MD))
1681  Info =
1683  else
1684  Info = &arrangeCXXMethodDeclaration(MD);
1685  return GetFunctionType(*Info);
1686 }
1687 
1689  llvm::AttrBuilder &FuncAttrs,
1690  const FunctionProtoType *FPT) {
1691  if (!FPT)
1692  return;
1693 
1695  FPT->isNothrow())
1696  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1697 }
1698 
1699 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1700  bool AttrOnCallSite,
1701  llvm::AttrBuilder &FuncAttrs) {
1702  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1703  if (!HasOptnone) {
1704  if (CodeGenOpts.OptimizeSize)
1705  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1706  if (CodeGenOpts.OptimizeSize == 2)
1707  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1708  }
1709 
1710  if (CodeGenOpts.DisableRedZone)
1711  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1712  if (CodeGenOpts.NoImplicitFloat)
1713  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1714 
1715  if (AttrOnCallSite) {
1716  // Attributes that should go on the call site only.
1717  if (!CodeGenOpts.SimplifyLibCalls ||
1718  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1719  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1720  if (!CodeGenOpts.TrapFuncName.empty())
1721  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1722  } else {
1723  // Attributes that should go on the function, but not the call site.
1724  if (!CodeGenOpts.DisableFPElim) {
1725  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1726  } else if (CodeGenOpts.OmitLeafFramePointer) {
1727  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1728  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1729  } else {
1730  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1731  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1732  }
1733 
1734  FuncAttrs.addAttribute("less-precise-fpmad",
1735  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1736 
1737  if (!CodeGenOpts.FPDenormalMode.empty())
1738  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1739 
1740  FuncAttrs.addAttribute("no-trapping-math",
1741  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1742 
1743  // Strict (compliant) code is the default, so only add this attribute to
1744  // indicate that we are trying to workaround a problem case.
1745  if (!CodeGenOpts.StrictFloatCastOverflow)
1746  FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1747 
1748  // TODO: Are these all needed?
1749  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1750  FuncAttrs.addAttribute("no-infs-fp-math",
1751  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1752  FuncAttrs.addAttribute("no-nans-fp-math",
1753  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1754  FuncAttrs.addAttribute("unsafe-fp-math",
1755  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1756  FuncAttrs.addAttribute("use-soft-float",
1757  llvm::toStringRef(CodeGenOpts.SoftFloat));
1758  FuncAttrs.addAttribute("stack-protector-buffer-size",
1759  llvm::utostr(CodeGenOpts.SSPBufferSize));
1760  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1761  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1762  FuncAttrs.addAttribute(
1763  "correctly-rounded-divide-sqrt-fp-math",
1764  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1765 
1766  if (getLangOpts().OpenCL)
1767  FuncAttrs.addAttribute("denorms-are-zero",
1768  llvm::toStringRef(CodeGenOpts.FlushDenorm));
1769 
1770  // TODO: Reciprocal estimate codegen options should apply to instructions?
1771  const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1772  if (!Recips.empty())
1773  FuncAttrs.addAttribute("reciprocal-estimates",
1774  llvm::join(Recips, ","));
1775 
1776  if (!CodeGenOpts.PreferVectorWidth.empty() &&
1777  CodeGenOpts.PreferVectorWidth != "none")
1778  FuncAttrs.addAttribute("prefer-vector-width",
1779  CodeGenOpts.PreferVectorWidth);
1780 
1781  if (CodeGenOpts.StackRealignment)
1782  FuncAttrs.addAttribute("stackrealign");
1783  if (CodeGenOpts.Backchain)
1784  FuncAttrs.addAttribute("backchain");
1785  }
1786 
1787  if (getLangOpts().assumeFunctionsAreConvergent()) {
1788  // Conservatively, mark all functions and calls in CUDA and OpenCL as
1789  // convergent (meaning, they may call an intrinsically convergent op, such
1790  // as __syncthreads() / barrier(), and so can't have certain optimizations
1791  // applied around them). LLVM will remove this attribute where it safely
1792  // can.
1793  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1794  }
1795 
1796  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1797  // Exceptions aren't supported in CUDA device code.
1798  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1799 
1800  // Respect -fcuda-flush-denormals-to-zero.
1801  if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1802  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1803  }
1804 }
1805 
1806 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1807  llvm::AttrBuilder FuncAttrs;
1808  ConstructDefaultFnAttrList(F.getName(),
1809  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1810  /* AttrOnCallsite = */ false, FuncAttrs);
1811  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1812 }
1813 
1815  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1816  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1817  llvm::AttrBuilder FuncAttrs;
1818  llvm::AttrBuilder RetAttrs;
1819 
1820  CallingConv = FI.getEffectiveCallingConvention();
1821  if (FI.isNoReturn())
1822  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1823 
1824  // If we have information about the function prototype, we can learn
1825  // attributes from there.
1827  CalleeInfo.getCalleeFunctionProtoType());
1828 
1829  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1830 
1831  bool HasOptnone = false;
1832  // FIXME: handle sseregparm someday...
1833  if (TargetDecl) {
1834  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1835  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1836  if (TargetDecl->hasAttr<NoThrowAttr>())
1837  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1838  if (TargetDecl->hasAttr<NoReturnAttr>())
1839  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1840  if (TargetDecl->hasAttr<ColdAttr>())
1841  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1842  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1843  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1844  if (TargetDecl->hasAttr<ConvergentAttr>())
1845  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1846 
1847  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1849  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1850  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1851  // These attributes are not inherited by overloads.
1852  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1853  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1854  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1855  }
1856 
1857  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1858  if (TargetDecl->hasAttr<ConstAttr>()) {
1859  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1860  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1861  } else if (TargetDecl->hasAttr<PureAttr>()) {
1862  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1863  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1864  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1865  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1866  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1867  }
1868  if (TargetDecl->hasAttr<RestrictAttr>())
1869  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1870  if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1871  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1872  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1873  FuncAttrs.addAttribute("no_caller_saved_registers");
1874  if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1875  FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1876 
1877  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1878  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1879  Optional<unsigned> NumElemsParam;
1880  if (AllocSize->getNumElemsParam().isValid())
1881  NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1882  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1883  NumElemsParam);
1884  }
1885  }
1886 
1887  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1888 
1889  if (CodeGenOpts.EnableSegmentedStacks &&
1890  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1891  FuncAttrs.addAttribute("split-stack");
1892 
1893  // Add NonLazyBind attribute to function declarations when -fno-plt
1894  // is used.
1895  if (TargetDecl && CodeGenOpts.NoPLT) {
1896  if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1897  if (!Fn->isDefined() && !AttrOnCallSite) {
1898  FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1899  }
1900  }
1901  }
1902 
1903  if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1904  if (getLangOpts().OpenCLVersion <= 120) {
1905  // OpenCL v1.2 Work groups are always uniform
1906  FuncAttrs.addAttribute("uniform-work-group-size", "true");
1907  } else {
1908  // OpenCL v2.0 Work groups may be whether uniform or not.
1909  // '-cl-uniform-work-group-size' compile option gets a hint
1910  // to the compiler that the global work-size be a multiple of
1911  // the work-group size specified to clEnqueueNDRangeKernel
1912  // (i.e. work groups are uniform).
1913  FuncAttrs.addAttribute("uniform-work-group-size",
1914  llvm::toStringRef(CodeGenOpts.UniformWGSize));
1915  }
1916  }
1917 
1918  if (!AttrOnCallSite) {
1919  bool DisableTailCalls = false;
1920 
1921  if (CodeGenOpts.DisableTailCalls)
1922  DisableTailCalls = true;
1923  else if (TargetDecl) {
1924  if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1925  TargetDecl->hasAttr<AnyX86InterruptAttr>())
1926  DisableTailCalls = true;
1927  else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1928  if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1929  if (!BD->doesNotEscape())
1930  DisableTailCalls = true;
1931  }
1932  }
1933 
1934  FuncAttrs.addAttribute("disable-tail-calls",
1935  llvm::toStringRef(DisableTailCalls));
1936  GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
1937  }
1938 
1939  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1940 
1941  QualType RetTy = FI.getReturnType();
1942  const ABIArgInfo &RetAI = FI.getReturnInfo();
1943  switch (RetAI.getKind()) {
1944  case ABIArgInfo::Extend:
1945  if (RetAI.isSignExt())
1946  RetAttrs.addAttribute(llvm::Attribute::SExt);
1947  else
1948  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1949  LLVM_FALLTHROUGH;
1950  case ABIArgInfo::Direct:
1951  if (RetAI.getInReg())
1952  RetAttrs.addAttribute(llvm::Attribute::InReg);
1953  break;
1954  case ABIArgInfo::Ignore:
1955  break;
1956 
1957  case ABIArgInfo::InAlloca:
1958  case ABIArgInfo::Indirect: {
1959  // inalloca and sret disable readnone and readonly
1960  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1961  .removeAttribute(llvm::Attribute::ReadNone);
1962  break;
1963  }
1964 
1966  break;
1967 
1968  case ABIArgInfo::Expand:
1969  llvm_unreachable("Invalid ABI kind for return argument");
1970  }
1971 
1972  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1973  QualType PTy = RefTy->getPointeeType();
1974  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1975  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1976  .getQuantity());
1977  else if (getContext().getTargetAddressSpace(PTy) == 0)
1978  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1979  }
1980 
1981  bool hasUsedSRet = false;
1982  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1983 
1984  // Attach attributes to sret.
1985  if (IRFunctionArgs.hasSRetArg()) {
1986  llvm::AttrBuilder SRETAttrs;
1987  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1988  hasUsedSRet = true;
1989  if (RetAI.getInReg())
1990  SRETAttrs.addAttribute(llvm::Attribute::InReg);
1991  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
1992  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
1993  }
1994 
1995  // Attach attributes to inalloca argument.
1996  if (IRFunctionArgs.hasInallocaArg()) {
1997  llvm::AttrBuilder Attrs;
1998  Attrs.addAttribute(llvm::Attribute::InAlloca);
1999  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2000  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2001  }
2002 
2003  unsigned ArgNo = 0;
2005  E = FI.arg_end();
2006  I != E; ++I, ++ArgNo) {
2007  QualType ParamType = I->type;
2008  const ABIArgInfo &AI = I->info;
2009  llvm::AttrBuilder Attrs;
2010 
2011  // Add attribute for padding argument, if necessary.
2012  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2013  if (AI.getPaddingInReg()) {
2014  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2015  llvm::AttributeSet::get(
2016  getLLVMContext(),
2017  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2018  }
2019  }
2020 
2021  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2022  // have the corresponding parameter variable. It doesn't make
2023  // sense to do it here because parameters are so messed up.
2024  switch (AI.getKind()) {
2025  case ABIArgInfo::Extend:
2026  if (AI.isSignExt())
2027  Attrs.addAttribute(llvm::Attribute::SExt);
2028  else
2029  Attrs.addAttribute(llvm::Attribute::ZExt);
2030  LLVM_FALLTHROUGH;
2031  case ABIArgInfo::Direct:
2032  if (ArgNo == 0 && FI.isChainCall())
2033  Attrs.addAttribute(llvm::Attribute::Nest);
2034  else if (AI.getInReg())
2035  Attrs.addAttribute(llvm::Attribute::InReg);
2036  break;
2037 
2038  case ABIArgInfo::Indirect: {
2039  if (AI.getInReg())
2040  Attrs.addAttribute(llvm::Attribute::InReg);
2041 
2042  if (AI.getIndirectByVal())
2043  Attrs.addAttribute(llvm::Attribute::ByVal);
2044 
2045  CharUnits Align = AI.getIndirectAlign();
2046 
2047  // In a byval argument, it is important that the required
2048  // alignment of the type is honored, as LLVM might be creating a
2049  // *new* stack object, and needs to know what alignment to give
2050  // it. (Sometimes it can deduce a sensible alignment on its own,
2051  // but not if clang decides it must emit a packed struct, or the
2052  // user specifies increased alignment requirements.)
2053  //
2054  // This is different from indirect *not* byval, where the object
2055  // exists already, and the align attribute is purely
2056  // informative.
2057  assert(!Align.isZero());
2058 
2059  // For now, only add this when we have a byval argument.
2060  // TODO: be less lazy about updating test cases.
2061  if (AI.getIndirectByVal())
2062  Attrs.addAlignmentAttr(Align.getQuantity());
2063 
2064  // byval disables readnone and readonly.
2065  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2066  .removeAttribute(llvm::Attribute::ReadNone);
2067  break;
2068  }
2069  case ABIArgInfo::Ignore:
2070  case ABIArgInfo::Expand:
2072  break;
2073 
2074  case ABIArgInfo::InAlloca:
2075  // inalloca disables readnone and readonly.
2076  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2077  .removeAttribute(llvm::Attribute::ReadNone);
2078  continue;
2079  }
2080 
2081  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2082  QualType PTy = RefTy->getPointeeType();
2083  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2084  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2085  .getQuantity());
2086  else if (getContext().getTargetAddressSpace(PTy) == 0)
2087  Attrs.addAttribute(llvm::Attribute::NonNull);
2088  }
2089 
2090  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2092  break;
2093 
2095  // Add 'sret' if we haven't already used it for something, but
2096  // only if the result is void.
2097  if (!hasUsedSRet && RetTy->isVoidType()) {
2098  Attrs.addAttribute(llvm::Attribute::StructRet);
2099  hasUsedSRet = true;
2100  }
2101 
2102  // Add 'noalias' in either case.
2103  Attrs.addAttribute(llvm::Attribute::NoAlias);
2104 
2105  // Add 'dereferenceable' and 'alignment'.
2106  auto PTy = ParamType->getPointeeType();
2107  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2108  auto info = getContext().getTypeInfoInChars(PTy);
2109  Attrs.addDereferenceableAttr(info.first.getQuantity());
2110  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2111  info.second.getQuantity()));
2112  }
2113  break;
2114  }
2115 
2117  Attrs.addAttribute(llvm::Attribute::SwiftError);
2118  break;
2119 
2121  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2122  break;
2123  }
2124 
2125  if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2126  Attrs.addAttribute(llvm::Attribute::NoCapture);
2127 
2128  if (Attrs.hasAttributes()) {
2129  unsigned FirstIRArg, NumIRArgs;
2130  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2131  for (unsigned i = 0; i < NumIRArgs; i++)
2132  ArgAttrs[FirstIRArg + i] =
2133  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2134  }
2135  }
2136  assert(ArgNo == FI.arg_size());
2137 
2138  AttrList = llvm::AttributeList::get(
2139  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2140  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2141 }
2142 
2143 /// An argument came in as a promoted argument; demote it back to its
2144 /// declared type.
2146  const VarDecl *var,
2147  llvm::Value *value) {
2148  llvm::Type *varType = CGF.ConvertType(var->getType());
2149 
2150  // This can happen with promotions that actually don't change the
2151  // underlying type, like the enum promotions.
2152  if (value->getType() == varType) return value;
2153 
2154  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2155  && "unexpected promotion type");
2156 
2157  if (isa<llvm::IntegerType>(varType))
2158  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2159 
2160  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2161 }
2162 
2163 /// Returns the attribute (either parameter attribute, or function
2164 /// attribute), which declares argument ArgNo to be non-null.
2165 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2166  QualType ArgType, unsigned ArgNo) {
2167  // FIXME: __attribute__((nonnull)) can also be applied to:
2168  // - references to pointers, where the pointee is known to be
2169  // nonnull (apparently a Clang extension)
2170  // - transparent unions containing pointers
2171  // In the former case, LLVM IR cannot represent the constraint. In
2172  // the latter case, we have no guarantee that the transparent union
2173  // is in fact passed as a pointer.
2174  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2175  return nullptr;
2176  // First, check attribute on parameter itself.
2177  if (PVD) {
2178  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2179  return ParmNNAttr;
2180  }
2181  // Check function attributes.
2182  if (!FD)
2183  return nullptr;
2184  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2185  if (NNAttr->isNonNull(ArgNo))
2186  return NNAttr;
2187  }
2188  return nullptr;
2189 }
2190 
2191 namespace {
2192  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2193  Address Temp;
2194  Address Arg;
2195  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2196  void Emit(CodeGenFunction &CGF, Flags flags) override {
2197  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2198  CGF.Builder.CreateStore(errorValue, Arg);
2199  }
2200  };
2201 }
2202 
2204  llvm::Function *Fn,
2205  const FunctionArgList &Args) {
2206  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2207  // Naked functions don't have prologues.
2208  return;
2209 
2210  // If this is an implicit-return-zero function, go ahead and
2211  // initialize the return value. TODO: it might be nice to have
2212  // a more general mechanism for this that didn't require synthesized
2213  // return statements.
2214  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2215  if (FD->hasImplicitReturnZero()) {
2216  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2217  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2218  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2219  Builder.CreateStore(Zero, ReturnValue);
2220  }
2221  }
2222 
2223  // FIXME: We no longer need the types from FunctionArgList; lift up and
2224  // simplify.
2225 
2226  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2227  // Flattened function arguments.
2229  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2230  for (auto &Arg : Fn->args()) {
2231  FnArgs.push_back(&Arg);
2232  }
2233  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2234 
2235  // If we're using inalloca, all the memory arguments are GEPs off of the last
2236  // parameter, which is a pointer to the complete memory area.
2237  Address ArgStruct = Address::invalid();
2238  const llvm::StructLayout *ArgStructLayout = nullptr;
2239  if (IRFunctionArgs.hasInallocaArg()) {
2240  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2241  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2242  FI.getArgStructAlignment());
2243 
2244  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2245  }
2246 
2247  // Name the struct return parameter.
2248  if (IRFunctionArgs.hasSRetArg()) {
2249  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2250  AI->setName("agg.result");
2251  AI->addAttr(llvm::Attribute::NoAlias);
2252  }
2253 
2254  // Track if we received the parameter as a pointer (indirect, byval, or
2255  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2256  // into a local alloca for us.
2258  ArgVals.reserve(Args.size());
2259 
2260  // Create a pointer value for every parameter declaration. This usually
2261  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2262  // any cleanups or do anything that might unwind. We do that separately, so
2263  // we can push the cleanups in the correct order for the ABI.
2264  assert(FI.arg_size() == Args.size() &&
2265  "Mismatch between function signature & arguments.");
2266  unsigned ArgNo = 0;
2268  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2269  i != e; ++i, ++info_it, ++ArgNo) {
2270  const VarDecl *Arg = *i;
2271  const ABIArgInfo &ArgI = info_it->info;
2272 
2273  bool isPromoted =
2274  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2275  // We are converting from ABIArgInfo type to VarDecl type directly, unless
2276  // the parameter is promoted. In this case we convert to
2277  // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2278  QualType Ty = isPromoted ? info_it->type : Arg->getType();
2279  assert(hasScalarEvaluationKind(Ty) ==
2280  hasScalarEvaluationKind(Arg->getType()));
2281 
2282  unsigned FirstIRArg, NumIRArgs;
2283  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2284 
2285  switch (ArgI.getKind()) {
2286  case ABIArgInfo::InAlloca: {
2287  assert(NumIRArgs == 0);
2288  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2289  CharUnits FieldOffset =
2290  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2291  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2292  Arg->getName());
2293  ArgVals.push_back(ParamValue::forIndirect(V));
2294  break;
2295  }
2296 
2297  case ABIArgInfo::Indirect: {
2298  assert(NumIRArgs == 1);
2299  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2300 
2301  if (!hasScalarEvaluationKind(Ty)) {
2302  // Aggregates and complex variables are accessed by reference. All we
2303  // need to do is realign the value, if requested.
2304  Address V = ParamAddr;
2305  if (ArgI.getIndirectRealign()) {
2306  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2307 
2308  // Copy from the incoming argument pointer to the temporary with the
2309  // appropriate alignment.
2310  //
2311  // FIXME: We should have a common utility for generating an aggregate
2312  // copy.
2314  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2315  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2316  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2317  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2318  V = AlignedTemp;
2319  }
2320  ArgVals.push_back(ParamValue::forIndirect(V));
2321  } else {
2322  // Load scalar value from indirect argument.
2323  llvm::Value *V =
2324  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2325 
2326  if (isPromoted)
2327  V = emitArgumentDemotion(*this, Arg, V);
2328  ArgVals.push_back(ParamValue::forDirect(V));
2329  }
2330  break;
2331  }
2332 
2333  case ABIArgInfo::Extend:
2334  case ABIArgInfo::Direct: {
2335 
2336  // If we have the trivial case, handle it with no muss and fuss.
2337  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2338  ArgI.getCoerceToType() == ConvertType(Ty) &&
2339  ArgI.getDirectOffset() == 0) {
2340  assert(NumIRArgs == 1);
2341  llvm::Value *V = FnArgs[FirstIRArg];
2342  auto AI = cast<llvm::Argument>(V);
2343 
2344  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2345  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2346  PVD->getFunctionScopeIndex()))
2347  AI->addAttr(llvm::Attribute::NonNull);
2348 
2349  QualType OTy = PVD->getOriginalType();
2350  if (const auto *ArrTy =
2351  getContext().getAsConstantArrayType(OTy)) {
2352  // A C99 array parameter declaration with the static keyword also
2353  // indicates dereferenceability, and if the size is constant we can
2354  // use the dereferenceable attribute (which requires the size in
2355  // bytes).
2356  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2357  QualType ETy = ArrTy->getElementType();
2358  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2359  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2360  ArrSize) {
2361  llvm::AttrBuilder Attrs;
2362  Attrs.addDereferenceableAttr(
2363  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2364  AI->addAttrs(Attrs);
2365  } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2366  AI->addAttr(llvm::Attribute::NonNull);
2367  }
2368  }
2369  } else if (const auto *ArrTy =
2370  getContext().getAsVariableArrayType(OTy)) {
2371  // For C99 VLAs with the static keyword, we don't know the size so
2372  // we can't use the dereferenceable attribute, but in addrspace(0)
2373  // we know that it must be nonnull.
2374  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2375  !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2376  AI->addAttr(llvm::Attribute::NonNull);
2377  }
2378 
2379  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2380  if (!AVAttr)
2381  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2382  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2383  if (AVAttr) {
2384  llvm::Value *AlignmentValue =
2385  EmitScalarExpr(AVAttr->getAlignment());
2386  llvm::ConstantInt *AlignmentCI =
2387  cast<llvm::ConstantInt>(AlignmentValue);
2388  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2389  +llvm::Value::MaximumAlignment);
2390  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2391  }
2392  }
2393 
2394  if (Arg->getType().isRestrictQualified())
2395  AI->addAttr(llvm::Attribute::NoAlias);
2396 
2397  // LLVM expects swifterror parameters to be used in very restricted
2398  // ways. Copy the value into a less-restricted temporary.
2399  if (FI.getExtParameterInfo(ArgNo).getABI()
2401  QualType pointeeTy = Ty->getPointeeType();
2402  assert(pointeeTy->isPointerType());
2403  Address temp =
2404  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2405  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2406  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2407  Builder.CreateStore(incomingErrorValue, temp);
2408  V = temp.getPointer();
2409 
2410  // Push a cleanup to copy the value back at the end of the function.
2411  // The convention does not guarantee that the value will be written
2412  // back if the function exits with an unwind exception.
2413  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2414  }
2415 
2416  // Ensure the argument is the correct type.
2417  if (V->getType() != ArgI.getCoerceToType())
2418  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2419 
2420  if (isPromoted)
2421  V = emitArgumentDemotion(*this, Arg, V);
2422 
2423  // Because of merging of function types from multiple decls it is
2424  // possible for the type of an argument to not match the corresponding
2425  // type in the function type. Since we are codegening the callee
2426  // in here, add a cast to the argument type.
2427  llvm::Type *LTy = ConvertType(Arg->getType());
2428  if (V->getType() != LTy)
2429  V = Builder.CreateBitCast(V, LTy);
2430 
2431  ArgVals.push_back(ParamValue::forDirect(V));
2432  break;
2433  }
2434 
2435  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2436  Arg->getName());
2437 
2438  // Pointer to store into.
2439  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2440 
2441  // Fast-isel and the optimizer generally like scalar values better than
2442  // FCAs, so we flatten them if this is safe to do for this argument.
2443  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2444  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2445  STy->getNumElements() > 1) {
2446  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2447  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2448  llvm::Type *DstTy = Ptr.getElementType();
2449  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2450 
2451  Address AddrToStoreInto = Address::invalid();
2452  if (SrcSize <= DstSize) {
2453  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2454  } else {
2455  AddrToStoreInto =
2456  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2457  }
2458 
2459  assert(STy->getNumElements() == NumIRArgs);
2460  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2461  auto AI = FnArgs[FirstIRArg + i];
2462  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2463  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2464  Address EltPtr =
2465  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2466  Builder.CreateStore(AI, EltPtr);
2467  }
2468 
2469  if (SrcSize > DstSize) {
2470  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2471  }
2472 
2473  } else {
2474  // Simple case, just do a coerced store of the argument into the alloca.
2475  assert(NumIRArgs == 1);
2476  auto AI = FnArgs[FirstIRArg];
2477  AI->setName(Arg->getName() + ".coerce");
2478  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2479  }
2480 
2481  // Match to what EmitParmDecl is expecting for this type.
2483  llvm::Value *V =
2484  EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2485  if (isPromoted)
2486  V = emitArgumentDemotion(*this, Arg, V);
2487  ArgVals.push_back(ParamValue::forDirect(V));
2488  } else {
2489  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2490  }
2491  break;
2492  }
2493 
2495  // Reconstruct into a temporary.
2496  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2497  ArgVals.push_back(ParamValue::forIndirect(alloca));
2498 
2499  auto coercionType = ArgI.getCoerceAndExpandType();
2500  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2501  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2502 
2503  unsigned argIndex = FirstIRArg;
2504  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2505  llvm::Type *eltType = coercionType->getElementType(i);
2507  continue;
2508 
2509  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2510  auto elt = FnArgs[argIndex++];
2511  Builder.CreateStore(elt, eltAddr);
2512  }
2513  assert(argIndex == FirstIRArg + NumIRArgs);
2514  break;
2515  }
2516 
2517  case ABIArgInfo::Expand: {
2518  // If this structure was expanded into multiple arguments then
2519  // we need to create a temporary and reconstruct it from the
2520  // arguments.
2521  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2522  LValue LV = MakeAddrLValue(Alloca, Ty);
2523  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2524 
2525  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2526  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2527  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2528  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2529  auto AI = FnArgs[FirstIRArg + i];
2530  AI->setName(Arg->getName() + "." + Twine(i));
2531  }
2532  break;
2533  }
2534 
2535  case ABIArgInfo::Ignore:
2536  assert(NumIRArgs == 0);
2537  // Initialize the local variable appropriately.
2538  if (!hasScalarEvaluationKind(Ty)) {
2539  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2540  } else {
2541  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2542  ArgVals.push_back(ParamValue::forDirect(U));
2543  }
2544  break;
2545  }
2546  }
2547 
2548  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2549  for (int I = Args.size() - 1; I >= 0; --I)
2550  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2551  } else {
2552  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2553  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2554  }
2555 }
2556 
2557 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2558  while (insn->use_empty()) {
2559  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2560  if (!bitcast) return;
2561 
2562  // This is "safe" because we would have used a ConstantExpr otherwise.
2563  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2564  bitcast->eraseFromParent();
2565  }
2566 }
2567 
2568 /// Try to emit a fused autorelease of a return result.
2570  llvm::Value *result) {
2571  // We must be immediately followed the cast.
2572  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2573  if (BB->empty()) return nullptr;
2574  if (&BB->back() != result) return nullptr;
2575 
2576  llvm::Type *resultType = result->getType();
2577 
2578  // result is in a BasicBlock and is therefore an Instruction.
2579  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2580 
2581  SmallVector<llvm::Instruction *, 4> InstsToKill;
2582 
2583  // Look for:
2584  // %generator = bitcast %type1* %generator2 to %type2*
2585  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2586  // We would have emitted this as a constant if the operand weren't
2587  // an Instruction.
2588  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2589 
2590  // Require the generator to be immediately followed by the cast.
2591  if (generator->getNextNode() != bitcast)
2592  return nullptr;
2593 
2594  InstsToKill.push_back(bitcast);
2595  }
2596 
2597  // Look for:
2598  // %generator = call i8* @objc_retain(i8* %originalResult)
2599  // or
2600  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2601  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2602  if (!call) return nullptr;
2603 
2604  bool doRetainAutorelease;
2605 
2606  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2607  doRetainAutorelease = true;
2608  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2610  doRetainAutorelease = false;
2611 
2612  // If we emitted an assembly marker for this call (and the
2613  // ARCEntrypoints field should have been set if so), go looking
2614  // for that call. If we can't find it, we can't do this
2615  // optimization. But it should always be the immediately previous
2616  // instruction, unless we needed bitcasts around the call.
2618  llvm::Instruction *prev = call->getPrevNode();
2619  assert(prev);
2620  if (isa<llvm::BitCastInst>(prev)) {
2621  prev = prev->getPrevNode();
2622  assert(prev);
2623  }
2624  assert(isa<llvm::CallInst>(prev));
2625  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2627  InstsToKill.push_back(prev);
2628  }
2629  } else {
2630  return nullptr;
2631  }
2632 
2633  result = call->getArgOperand(0);
2634  InstsToKill.push_back(call);
2635 
2636  // Keep killing bitcasts, for sanity. Note that we no longer care
2637  // about precise ordering as long as there's exactly one use.
2638  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2639  if (!bitcast->hasOneUse()) break;
2640  InstsToKill.push_back(bitcast);
2641  result = bitcast->getOperand(0);
2642  }
2643 
2644  // Delete all the unnecessary instructions, from latest to earliest.
2645  for (auto *I : InstsToKill)
2646  I->eraseFromParent();
2647 
2648  // Do the fused retain/autorelease if we were asked to.
2649  if (doRetainAutorelease)
2650  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2651 
2652  // Cast back to the result type.
2653  return CGF.Builder.CreateBitCast(result, resultType);
2654 }
2655 
2656 /// If this is a +1 of the value of an immutable 'self', remove it.
2658  llvm::Value *result) {
2659  // This is only applicable to a method with an immutable 'self'.
2660  const ObjCMethodDecl *method =
2661  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2662  if (!method) return nullptr;
2663  const VarDecl *self = method->getSelfDecl();
2664  if (!self->getType().isConstQualified()) return nullptr;
2665 
2666  // Look for a retain call.
2667  llvm::CallInst *retainCall =
2668  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2669  if (!retainCall ||
2670  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2671  return nullptr;
2672 
2673  // Look for an ordinary load of 'self'.
2674  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2675  llvm::LoadInst *load =
2676  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2677  if (!load || load->isAtomic() || load->isVolatile() ||
2678  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2679  return nullptr;
2680 
2681  // Okay! Burn it all down. This relies for correctness on the
2682  // assumption that the retain is emitted as part of the return and
2683  // that thereafter everything is used "linearly".
2684  llvm::Type *resultType = result->getType();
2685  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2686  assert(retainCall->use_empty());
2687  retainCall->eraseFromParent();
2688  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2689 
2690  return CGF.Builder.CreateBitCast(load, resultType);
2691 }
2692 
2693 /// Emit an ARC autorelease of the result of a function.
2694 ///
2695 /// \return the value to actually return from the function
2697  llvm::Value *result) {
2698  // If we're returning 'self', kill the initial retain. This is a
2699  // heuristic attempt to "encourage correctness" in the really unfortunate
2700  // case where we have a return of self during a dealloc and we desperately
2701  // need to avoid the possible autorelease.
2702  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2703  return self;
2704 
2705  // At -O0, try to emit a fused retain/autorelease.
2706  if (CGF.shouldUseFusedARCCalls())
2707  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2708  return fused;
2709 
2710  return CGF.EmitARCAutoreleaseReturnValue(result);
2711 }
2712 
2713 /// Heuristically search for a dominating store to the return-value slot.
2714 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2715  // Check if a User is a store which pointerOperand is the ReturnValue.
2716  // We are looking for stores to the ReturnValue, not for stores of the
2717  // ReturnValue to some other location.
2718  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2719  auto *SI = dyn_cast<llvm::StoreInst>(U);
2720  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2721  return nullptr;
2722  // These aren't actually possible for non-coerced returns, and we
2723  // only care about non-coerced returns on this code path.
2724  assert(!SI->isAtomic() && !SI->isVolatile());
2725  return SI;
2726  };
2727  // If there are multiple uses of the return-value slot, just check
2728  // for something immediately preceding the IP. Sometimes this can
2729  // happen with how we generate implicit-returns; it can also happen
2730  // with noreturn cleanups.
2731  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2732  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2733  if (IP->empty()) return nullptr;
2734  llvm::Instruction *I = &IP->back();
2735 
2736  // Skip lifetime markers
2737  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2738  IE = IP->rend();
2739  II != IE; ++II) {
2740  if (llvm::IntrinsicInst *Intrinsic =
2741  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2742  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2743  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2744  ++II;
2745  if (II == IE)
2746  break;
2747  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2748  continue;
2749  }
2750  }
2751  I = &*II;
2752  break;
2753  }
2754 
2755  return GetStoreIfValid(I);
2756  }
2757 
2758  llvm::StoreInst *store =
2759  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2760  if (!store) return nullptr;
2761 
2762  // Now do a first-and-dirty dominance check: just walk up the
2763  // single-predecessors chain from the current insertion point.
2764  llvm::BasicBlock *StoreBB = store->getParent();
2765  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2766  while (IP != StoreBB) {
2767  if (!(IP = IP->getSinglePredecessor()))
2768  return nullptr;
2769  }
2770 
2771  // Okay, the store's basic block dominates the insertion point; we
2772  // can do our thing.
2773  return store;
2774 }
2775 
2777  bool EmitRetDbgLoc,
2778  SourceLocation EndLoc) {
2779  if (FI.isNoReturn()) {
2780  // Noreturn functions don't return.
2781  EmitUnreachable(EndLoc);
2782  return;
2783  }
2784 
2785  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2786  // Naked functions don't have epilogues.
2787  Builder.CreateUnreachable();
2788  return;
2789  }
2790 
2791  // Functions with no result always return void.
2792  if (!ReturnValue.isValid()) {
2793  Builder.CreateRetVoid();
2794  return;
2795  }
2796 
2797  llvm::DebugLoc RetDbgLoc;
2798  llvm::Value *RV = nullptr;
2799  QualType RetTy = FI.getReturnType();
2800  const ABIArgInfo &RetAI = FI.getReturnInfo();
2801 
2802  switch (RetAI.getKind()) {
2803  case ABIArgInfo::InAlloca:
2804  // Aggregrates get evaluated directly into the destination. Sometimes we
2805  // need to return the sret value in a register, though.
2806  assert(hasAggregateEvaluationKind(RetTy));
2807  if (RetAI.getInAllocaSRet()) {
2808  llvm::Function::arg_iterator EI = CurFn->arg_end();
2809  --EI;
2810  llvm::Value *ArgStruct = &*EI;
2811  llvm::Value *SRet = Builder.CreateStructGEP(
2812  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2813  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2814  }
2815  break;
2816 
2817  case ABIArgInfo::Indirect: {
2818  auto AI = CurFn->arg_begin();
2819  if (RetAI.isSRetAfterThis())
2820  ++AI;
2821  switch (getEvaluationKind(RetTy)) {
2822  case TEK_Complex: {
2823  ComplexPairTy RT =
2824  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2825  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2826  /*isInit*/ true);
2827  break;
2828  }
2829  case TEK_Aggregate:
2830  // Do nothing; aggregrates get evaluated directly into the destination.
2831  break;
2832  case TEK_Scalar:
2833  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2834  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2835  /*isInit*/ true);
2836  break;
2837  }
2838  break;
2839  }
2840 
2841  case ABIArgInfo::Extend:
2842  case ABIArgInfo::Direct:
2843  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2844  RetAI.getDirectOffset() == 0) {
2845  // The internal return value temp always will have pointer-to-return-type
2846  // type, just do a load.
2847 
2848  // If there is a dominating store to ReturnValue, we can elide
2849  // the load, zap the store, and usually zap the alloca.
2850  if (llvm::StoreInst *SI =
2852  // Reuse the debug location from the store unless there is
2853  // cleanup code to be emitted between the store and return
2854  // instruction.
2855  if (EmitRetDbgLoc && !AutoreleaseResult)
2856  RetDbgLoc = SI->getDebugLoc();
2857  // Get the stored value and nuke the now-dead store.
2858  RV = SI->getValueOperand();
2859  SI->eraseFromParent();
2860 
2861  // If that was the only use of the return value, nuke it as well now.
2862  auto returnValueInst = ReturnValue.getPointer();
2863  if (returnValueInst->use_empty()) {
2864  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2865  alloca->eraseFromParent();
2866  ReturnValue = Address::invalid();
2867  }
2868  }
2869 
2870  // Otherwise, we have to do a simple load.
2871  } else {
2872  RV = Builder.CreateLoad(ReturnValue);
2873  }
2874  } else {
2875  // If the value is offset in memory, apply the offset now.
2876  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2877 
2878  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2879  }
2880 
2881  // In ARC, end functions that return a retainable type with a call
2882  // to objc_autoreleaseReturnValue.
2883  if (AutoreleaseResult) {
2884 #ifndef NDEBUG
2885  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2886  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2887  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2888  // CurCodeDecl or BlockInfo.
2889  QualType RT;
2890 
2891  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2892  RT = FD->getReturnType();
2893  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2894  RT = MD->getReturnType();
2895  else if (isa<BlockDecl>(CurCodeDecl))
2896  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2897  else
2898  llvm_unreachable("Unexpected function/method type");
2899 
2900  assert(getLangOpts().ObjCAutoRefCount &&
2901  !FI.isReturnsRetained() &&
2902  RT->isObjCRetainableType());
2903 #endif
2904  RV = emitAutoreleaseOfResult(*this, RV);
2905  }
2906 
2907  break;
2908 
2909  case ABIArgInfo::Ignore:
2910  break;
2911 
2913  auto coercionType = RetAI.getCoerceAndExpandType();
2914  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2915 
2916  // Load all of the coerced elements out into results.
2918  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2919  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2920  auto coercedEltType = coercionType->getElementType(i);
2921  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2922  continue;
2923 
2924  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2925  auto elt = Builder.CreateLoad(eltAddr);
2926  results.push_back(elt);
2927  }
2928 
2929  // If we have one result, it's the single direct result type.
2930  if (results.size() == 1) {
2931  RV = results[0];
2932 
2933  // Otherwise, we need to make a first-class aggregate.
2934  } else {
2935  // Construct a return type that lacks padding elements.
2936  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2937 
2938  RV = llvm::UndefValue::get(returnType);
2939  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2940  RV = Builder.CreateInsertValue(RV, results[i], i);
2941  }
2942  }
2943  break;
2944  }
2945 
2946  case ABIArgInfo::Expand:
2947  llvm_unreachable("Invalid ABI kind for return argument");
2948  }
2949 
2950  llvm::Instruction *Ret;
2951  if (RV) {
2952  EmitReturnValueCheck(RV);
2953  Ret = Builder.CreateRet(RV);
2954  } else {
2955  Ret = Builder.CreateRetVoid();
2956  }
2957 
2958  if (RetDbgLoc)
2959  Ret->setDebugLoc(std::move(RetDbgLoc));
2960 }
2961 
2963  // A current decl may not be available when emitting vtable thunks.
2964  if (!CurCodeDecl)
2965  return;
2966 
2967  ReturnsNonNullAttr *RetNNAttr = nullptr;
2968  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2969  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2970 
2971  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2972  return;
2973 
2974  // Prefer the returns_nonnull attribute if it's present.
2975  SourceLocation AttrLoc;
2976  SanitizerMask CheckKind;
2977  SanitizerHandler Handler;
2978  if (RetNNAttr) {
2979  assert(!requiresReturnValueNullabilityCheck() &&
2980  "Cannot check nullability and the nonnull attribute");
2981  AttrLoc = RetNNAttr->getLocation();
2982  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2983  Handler = SanitizerHandler::NonnullReturn;
2984  } else {
2985  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2986  if (auto *TSI = DD->getTypeSourceInfo())
2987  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2988  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2989  CheckKind = SanitizerKind::NullabilityReturn;
2990  Handler = SanitizerHandler::NullabilityReturn;
2991  }
2992 
2993  SanitizerScope SanScope(this);
2994 
2995  // Make sure the "return" source location is valid. If we're checking a
2996  // nullability annotation, make sure the preconditions for the check are met.
2997  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
2998  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
2999  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3000  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3001  if (requiresReturnValueNullabilityCheck())
3002  CanNullCheck =
3003  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3004  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3005  EmitBlock(Check);
3006 
3007  // Now do the null check.
3008  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3009  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3010  llvm::Value *DynamicData[] = {SLocPtr};
3011  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3012 
3013  EmitBlock(NoCheck);
3014 
3015 #ifndef NDEBUG
3016  // The return location should not be used after the check has been emitted.
3017  ReturnLocation = Address::invalid();
3018 #endif
3019 }
3020 
3022  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3023  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3024 }
3025 
3027  QualType Ty) {
3028  // FIXME: Generate IR in one pass, rather than going back and fixing up these
3029  // placeholders.
3030  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3031  llvm::Type *IRPtrTy = IRTy->getPointerTo();
3032  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3033 
3034  // FIXME: When we generate this IR in one pass, we shouldn't need
3035  // this win32-specific alignment hack.
3037  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3038 
3039  return AggValueSlot::forAddr(Address(Placeholder, Align),
3040  Ty.getQualifiers(),
3045 }
3046 
3048  const VarDecl *param,
3049  SourceLocation loc) {
3050  // StartFunction converted the ABI-lowered parameter(s) into a
3051  // local alloca. We need to turn that into an r-value suitable
3052  // for EmitCall.
3053  Address local = GetAddrOfLocalVar(param);
3054 
3055  QualType type = param->getType();
3056 
3057  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3058  "cannot emit delegate call arguments for inalloca arguments!");
3059 
3060  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3061  // but the argument needs to be the original pointer.
3062  if (type->isReferenceType()) {
3063  args.add(RValue::get(Builder.CreateLoad(local)), type);
3064 
3065  // In ARC, move out of consumed arguments so that the release cleanup
3066  // entered by StartFunction doesn't cause an over-release. This isn't
3067  // optimal -O0 code generation, but it should get cleaned up when
3068  // optimization is enabled. This also assumes that delegate calls are
3069  // performed exactly once for a set of arguments, but that should be safe.
3070  } else if (getLangOpts().ObjCAutoRefCount &&
3071  param->hasAttr<NSConsumedAttr>() &&
3072  type->isObjCRetainableType()) {
3073  llvm::Value *ptr = Builder.CreateLoad(local);
3074  auto null =
3075  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3076  Builder.CreateStore(null, local);
3077  args.add(RValue::get(ptr), type);
3078 
3079  // For the most part, we just need to load the alloca, except that
3080  // aggregate r-values are actually pointers to temporaries.
3081  } else {
3082  args.add(convertTempToRValue(local, type, loc), type);
3083  }
3084 
3085  // Deactivate the cleanup for the callee-destructed param that was pushed.
3086  if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3088  type.isDestructedType()) {
3090  CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3091  assert(cleanup.isValid() &&
3092  "cleanup for callee-destructed param not recorded");
3093  // This unreachable is a temporary marker which will be removed later.
3094  llvm::Instruction *isActive = Builder.CreateUnreachable();
3095  args.addArgCleanupDeactivation(cleanup, isActive);
3096  }
3097 }
3098 
3099 static bool isProvablyNull(llvm::Value *addr) {
3100  return isa<llvm::ConstantPointerNull>(addr);
3101 }
3102 
3103 /// Emit the actual writing-back of a writeback.
3105  const CallArgList::Writeback &writeback) {
3106  const LValue &srcLV = writeback.Source;
3107  Address srcAddr = srcLV.getAddress();
3108  assert(!isProvablyNull(srcAddr.getPointer()) &&
3109  "shouldn't have writeback for provably null argument");
3110 
3111  llvm::BasicBlock *contBB = nullptr;
3112 
3113  // If the argument wasn't provably non-null, we need to null check
3114  // before doing the store.
3115  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3116  CGF.CGM.getDataLayout());
3117  if (!provablyNonNull) {
3118  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3119  contBB = CGF.createBasicBlock("icr.done");
3120 
3121  llvm::Value *isNull =
3122  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3123  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3124  CGF.EmitBlock(writebackBB);
3125  }
3126 
3127  // Load the value to writeback.
3128  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3129 
3130  // Cast it back, in case we're writing an id to a Foo* or something.
3131  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3132  "icr.writeback-cast");
3133 
3134  // Perform the writeback.
3135 
3136  // If we have a "to use" value, it's something we need to emit a use
3137  // of. This has to be carefully threaded in: if it's done after the
3138  // release it's potentially undefined behavior (and the optimizer
3139  // will ignore it), and if it happens before the retain then the
3140  // optimizer could move the release there.
3141  if (writeback.ToUse) {
3142  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3143 
3144  // Retain the new value. No need to block-copy here: the block's
3145  // being passed up the stack.
3146  value = CGF.EmitARCRetainNonBlock(value);
3147 
3148  // Emit the intrinsic use here.
3149  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3150 
3151  // Load the old value (primitively).
3152  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3153 
3154  // Put the new value in place (primitively).
3155  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3156 
3157  // Release the old value.
3158  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3159 
3160  // Otherwise, we can just do a normal lvalue store.
3161  } else {
3162  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3163  }
3164 
3165  // Jump to the continuation block.
3166  if (!provablyNonNull)
3167  CGF.EmitBlock(contBB);
3168 }
3169 
3171  const CallArgList &args) {
3172  for (const auto &I : args.writebacks())
3173  emitWriteback(CGF, I);
3174 }
3175 
3177  const CallArgList &CallArgs) {
3179  CallArgs.getCleanupsToDeactivate();
3180  // Iterate in reverse to increase the likelihood of popping the cleanup.
3181  for (const auto &I : llvm::reverse(Cleanups)) {
3182  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3183  I.IsActiveIP->eraseFromParent();
3184  }
3185 }
3186 
3187 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3188  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3189  if (uop->getOpcode() == UO_AddrOf)
3190  return uop->getSubExpr();
3191  return nullptr;
3192 }
3193 
3194 /// Emit an argument that's being passed call-by-writeback. That is,
3195 /// we are passing the address of an __autoreleased temporary; it
3196 /// might be copy-initialized with the current value of the given
3197 /// address, but it will definitely be copied out of after the call.
3199  const ObjCIndirectCopyRestoreExpr *CRE) {
3200  LValue srcLV;
3201 
3202  // Make an optimistic effort to emit the address as an l-value.
3203  // This can fail if the argument expression is more complicated.
3204  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3205  srcLV = CGF.EmitLValue(lvExpr);
3206 
3207  // Otherwise, just emit it as a scalar.
3208  } else {
3209  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3210 
3211  QualType srcAddrType =
3212  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3213  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3214  }
3215  Address srcAddr = srcLV.getAddress();
3216 
3217  // The dest and src types don't necessarily match in LLVM terms
3218  // because of the crazy ObjC compatibility rules.
3219 
3220  llvm::PointerType *destType =
3221  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3222 
3223  // If the address is a constant null, just pass the appropriate null.
3224  if (isProvablyNull(srcAddr.getPointer())) {
3225  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3226  CRE->getType());
3227  return;
3228  }
3229 
3230  // Create the temporary.
3231  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3232  CGF.getPointerAlign(),
3233  "icr.temp");
3234  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3235  // and that cleanup will be conditional if we can't prove that the l-value
3236  // isn't null, so we need to register a dominating point so that the cleanups
3237  // system will make valid IR.
3239 
3240  // Zero-initialize it if we're not doing a copy-initialization.
3241  bool shouldCopy = CRE->shouldCopy();
3242  if (!shouldCopy) {
3243  llvm::Value *null =
3244  llvm::ConstantPointerNull::get(
3245  cast<llvm::PointerType>(destType->getElementType()));
3246  CGF.Builder.CreateStore(null, temp);
3247  }
3248 
3249  llvm::BasicBlock *contBB = nullptr;
3250  llvm::BasicBlock *originBB = nullptr;
3251 
3252  // If the address is *not* known to be non-null, we need to switch.
3253  llvm::Value *finalArgument;
3254 
3255  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3256  CGF.CGM.getDataLayout());
3257  if (provablyNonNull) {
3258  finalArgument = temp.getPointer();
3259  } else {
3260  llvm::Value *isNull =
3261  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3262 
3263  finalArgument = CGF.Builder.CreateSelect(isNull,
3264  llvm::ConstantPointerNull::get(destType),
3265  temp.getPointer(), "icr.argument");
3266 
3267  // If we need to copy, then the load has to be conditional, which
3268  // means we need control flow.
3269  if (shouldCopy) {
3270  originBB = CGF.Builder.GetInsertBlock();
3271  contBB = CGF.createBasicBlock("icr.cont");
3272  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3273  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3274  CGF.EmitBlock(copyBB);
3275  condEval.begin(CGF);
3276  }
3277  }
3278 
3279  llvm::Value *valueToUse = nullptr;
3280 
3281  // Perform a copy if necessary.
3282  if (shouldCopy) {
3283  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3284  assert(srcRV.isScalar());
3285 
3286  llvm::Value *src = srcRV.getScalarVal();
3287  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3288  "icr.cast");
3289 
3290  // Use an ordinary store, not a store-to-lvalue.
3291  CGF.Builder.CreateStore(src, temp);
3292 
3293  // If optimization is enabled, and the value was held in a
3294  // __strong variable, we need to tell the optimizer that this
3295  // value has to stay alive until we're doing the store back.
3296  // This is because the temporary is effectively unretained,
3297  // and so otherwise we can violate the high-level semantics.
3298  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3300  valueToUse = src;
3301  }
3302  }
3303 
3304  // Finish the control flow if we needed it.
3305  if (shouldCopy && !provablyNonNull) {
3306  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3307  CGF.EmitBlock(contBB);
3308 
3309  // Make a phi for the value to intrinsically use.
3310  if (valueToUse) {
3311  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3312  "icr.to-use");
3313  phiToUse->addIncoming(valueToUse, copyBB);
3314  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3315  originBB);
3316  valueToUse = phiToUse;
3317  }
3318 
3319  condEval.end(CGF);
3320  }
3321 
3322  args.addWriteback(srcLV, temp, valueToUse);
3323  args.add(RValue::get(finalArgument), CRE->getType());
3324 }
3325 
3327  assert(!StackBase);
3328 
3329  // Save the stack.
3330  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3331  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3332 }
3333 
3335  if (StackBase) {
3336  // Restore the stack after the call.
3337  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3338  CGF.Builder.CreateCall(F, StackBase);
3339  }
3340 }
3341 
3343  SourceLocation ArgLoc,
3344  AbstractCallee AC,
3345  unsigned ParmNum) {
3346  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3347  SanOpts.has(SanitizerKind::NullabilityArg)))
3348  return;
3349 
3350  // The param decl may be missing in a variadic function.
3351  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3352  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3353 
3354  // Prefer the nonnull attribute if it's present.
3355  const NonNullAttr *NNAttr = nullptr;
3356  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3357  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3358 
3359  bool CanCheckNullability = false;
3360  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3361  auto Nullability = PVD->getType()->getNullability(getContext());
3362  CanCheckNullability = Nullability &&
3364  PVD->getTypeSourceInfo();
3365  }
3366 
3367  if (!NNAttr && !CanCheckNullability)
3368  return;
3369 
3370  SourceLocation AttrLoc;
3371  SanitizerMask CheckKind;
3372  SanitizerHandler Handler;
3373  if (NNAttr) {
3374  AttrLoc = NNAttr->getLocation();
3375  CheckKind = SanitizerKind::NonnullAttribute;
3376  Handler = SanitizerHandler::NonnullArg;
3377  } else {
3378  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3379  CheckKind = SanitizerKind::NullabilityArg;
3380  Handler = SanitizerHandler::NullabilityArg;
3381  }
3382 
3383  SanitizerScope SanScope(this);
3384  assert(RV.isScalar());
3385  llvm::Value *V = RV.getScalarVal();
3386  llvm::Value *Cond =
3387  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3388  llvm::Constant *StaticData[] = {
3389  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3390  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3391  };
3392  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3393 }
3394 
3396  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3397  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3398  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3399  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3400 
3401  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3402  // because arguments are destroyed left to right in the callee. As a special
3403  // case, there are certain language constructs that require left-to-right
3404  // evaluation, and in those cases we consider the evaluation order requirement
3405  // to trump the "destruction order is reverse construction order" guarantee.
3406  bool LeftToRight =
3407  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3408  ? Order == EvaluationOrder::ForceLeftToRight
3409  : Order != EvaluationOrder::ForceRightToLeft;
3410 
3411  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3412  RValue EmittedArg) {
3413  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3414  return;
3415  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3416  if (PS == nullptr)
3417  return;
3418 
3419  const auto &Context = getContext();
3420  auto SizeTy = Context.getSizeType();
3421  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3422  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3423  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3424  EmittedArg.getScalarVal());
3425  Args.add(RValue::get(V), SizeTy);
3426  // If we're emitting args in reverse, be sure to do so with
3427  // pass_object_size, as well.
3428  if (!LeftToRight)
3429  std::swap(Args.back(), *(&Args.back() - 1));
3430  };
3431 
3432  // Insert a stack save if we're going to need any inalloca args.
3433  bool HasInAllocaArgs = false;
3434  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3435  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3436  I != E && !HasInAllocaArgs; ++I)
3437  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3438  if (HasInAllocaArgs) {
3439  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3440  Args.allocateArgumentMemory(*this);
3441  }
3442  }
3443 
3444  // Evaluate each argument in the appropriate order.
3445  size_t CallArgsStart = Args.size();
3446  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3447  unsigned Idx = LeftToRight ? I : E - I - 1;
3448  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3449  unsigned InitialArgSize = Args.size();
3450  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3451  // the argument and parameter match or the objc method is parameterized.
3452  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3453  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3454  ArgTypes[Idx]) ||
3455  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3456  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3457  "Argument and parameter types don't match");
3458  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3459  // In particular, we depend on it being the last arg in Args, and the
3460  // objectsize bits depend on there only being one arg if !LeftToRight.
3461  assert(InitialArgSize + 1 == Args.size() &&
3462  "The code below depends on only adding one arg per EmitCallArg");
3463  (void)InitialArgSize;
3464  // Since pointer argument are never emitted as LValue, it is safe to emit
3465  // non-null argument check for r-value only.
3466  if (!Args.back().hasLValue()) {
3467  RValue RVArg = Args.back().getKnownRValue();
3468  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3469  ParamsToSkip + Idx);
3470  // @llvm.objectsize should never have side-effects and shouldn't need
3471  // destruction/cleanups, so we can safely "emit" it after its arg,
3472  // regardless of right-to-leftness
3473  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3474  }
3475  }
3476 
3477  if (!LeftToRight) {
3478  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3479  // IR function.
3480  std::reverse(Args.begin() + CallArgsStart, Args.end());
3481  }
3482 }
3483 
3484 namespace {
3485 
3486 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3487  DestroyUnpassedArg(Address Addr, QualType Ty)
3488  : Addr(Addr), Ty(Ty) {}
3489 
3490  Address Addr;
3491  QualType Ty;
3492 
3493  void Emit(CodeGenFunction &CGF, Flags flags) override {
3495  if (DtorKind == QualType::DK_cxx_destructor) {
3496  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3497  assert(!Dtor->isTrivial());
3498  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3499  /*Delegating=*/false, Addr);
3500  } else {
3501  CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3502  }
3503  }
3504 };
3505 
3506 struct DisableDebugLocationUpdates {
3507  CodeGenFunction &CGF;
3508  bool disabledDebugInfo;
3509  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3510  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3511  CGF.disableDebugInfo();
3512  }
3513  ~DisableDebugLocationUpdates() {
3514  if (disabledDebugInfo)
3515  CGF.enableDebugInfo();
3516  }
3517 };
3518 
3519 } // end anonymous namespace
3520 
3522  if (!HasLV)
3523  return RV;
3524  LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3526  LV.isVolatile());
3527  IsUsed = true;
3528  return RValue::getAggregate(Copy.getAddress());
3529 }
3530 
3532  LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3533  if (!HasLV && RV.isScalar())
3534  CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3535  else if (!HasLV && RV.isComplex())
3536  CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3537  else {
3538  auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3539  LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3540  // We assume that call args are never copied into subobjects.
3541  CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3542  HasLV ? LV.isVolatileQualified()
3543  : RV.isVolatileQualified());
3544  }
3545  IsUsed = true;
3546 }
3547 
3549  QualType type) {
3550  DisableDebugLocationUpdates Dis(*this, E);
3551  if (const ObjCIndirectCopyRestoreExpr *CRE
3552  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3553  assert(getLangOpts().ObjCAutoRefCount);
3554  return emitWritebackArg(*this, args, CRE);
3555  }
3556 
3557  assert(type->isReferenceType() == E->isGLValue() &&
3558  "reference binding to unmaterialized r-value!");
3559 
3560  if (E->isGLValue()) {
3561  assert(E->getObjectKind() == OK_Ordinary);
3562  return args.add(EmitReferenceBindingToExpr(E), type);
3563  }
3564 
3565  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3566 
3567  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3568  // However, we still have to push an EH-only cleanup in case we unwind before
3569  // we make it to the call.
3570  if (HasAggregateEvalKind &&
3572  // If we're using inalloca, use the argument memory. Otherwise, use a
3573  // temporary.
3574  AggValueSlot Slot;
3575  if (args.isUsingInAlloca())
3576  Slot = createPlaceholderSlot(*this, type);
3577  else
3578  Slot = CreateAggTemp(type, "agg.tmp");
3579 
3580  bool DestroyedInCallee = true, NeedsEHCleanup = true;
3581  if (const auto *RD = type->getAsCXXRecordDecl())
3582  DestroyedInCallee = RD->hasNonTrivialDestructor();
3583  else
3584  NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3585 
3586  if (DestroyedInCallee)
3587  Slot.setExternallyDestructed();
3588 
3589  EmitAggExpr(E, Slot);
3590  RValue RV = Slot.asRValue();
3591  args.add(RV, type);
3592 
3593  if (DestroyedInCallee && NeedsEHCleanup) {
3594  // Create a no-op GEP between the placeholder and the cleanup so we can
3595  // RAUW it successfully. It also serves as a marker of the first
3596  // instruction where the cleanup is active.
3597  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3598  type);
3599  // This unreachable is a temporary marker which will be removed later.
3600  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3601  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3602  }
3603  return;
3604  }
3605 
3606  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3607  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3608  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3609  assert(L.isSimple());
3610  args.addUncopiedAggregate(L, type);
3611  return;
3612  }
3613 
3614  args.add(EmitAnyExprToTemp(E), type);
3615 }
3616 
3617 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3618  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3619  // implicitly widens null pointer constants that are arguments to varargs
3620  // functions to pointer-sized ints.
3621  if (!getTarget().getTriple().isOSWindows())
3622  return Arg->getType();
3623 
3624  if (Arg->getType()->isIntegerType() &&
3625  getContext().getTypeSize(Arg->getType()) <
3629  return getContext().getIntPtrType();
3630  }
3631 
3632  return Arg->getType();
3633 }
3634 
3635 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3636 // optimizer it can aggressively ignore unwind edges.
3637 void
3638 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3639  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3640  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3641  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3642  CGM.getNoObjCARCExceptionsMetadata());
3643 }
3644 
3645 /// Emits a call to the given no-arguments nounwind runtime function.
3646 llvm::CallInst *
3648  const llvm::Twine &name) {
3649  return EmitNounwindRuntimeCall(callee, None, name);
3650 }
3651 
3652 /// Emits a call to the given nounwind runtime function.
3653 llvm::CallInst *
3656  const llvm::Twine &name) {
3657  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3658  call->setDoesNotThrow();
3659  return call;
3660 }
3661 
3662 /// Emits a simple call (never an invoke) to the given no-arguments
3663 /// runtime function.
3664 llvm::CallInst *
3666  const llvm::Twine &name) {
3667  return EmitRuntimeCall(callee, None, name);
3668 }
3669 
3670 // Calls which may throw must have operand bundles indicating which funclet
3671 // they are nested within.
3675  // There is no need for a funclet operand bundle if we aren't inside a
3676  // funclet.
3677  if (!CurrentFuncletPad)
3678  return BundleList;
3679 
3680  // Skip intrinsics which cannot throw.
3681  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3682  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3683  return BundleList;
3684 
3685  BundleList.emplace_back("funclet", CurrentFuncletPad);
3686  return BundleList;
3687 }
3688 
3689 /// Emits a simple call (never an invoke) to the given runtime function.
3690 llvm::CallInst *
3693  const llvm::Twine &name) {
3694  llvm::CallInst *call =
3695  Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3696  call->setCallingConv(getRuntimeCC());
3697  return call;
3698 }
3699 
3700 /// Emits a call or invoke to the given noreturn runtime function.
3702  ArrayRef<llvm::Value*> args) {
3704  getBundlesForFunclet(callee);
3705 
3706  if (getInvokeDest()) {
3707  llvm::InvokeInst *invoke =
3708  Builder.CreateInvoke(callee,
3709  getUnreachableBlock(),
3710  getInvokeDest(),
3711  args,
3712  BundleList);
3713  invoke->setDoesNotReturn();
3714  invoke->setCallingConv(getRuntimeCC());
3715  } else {
3716  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3717  call->setDoesNotReturn();
3718  call->setCallingConv(getRuntimeCC());
3719  Builder.CreateUnreachable();
3720  }
3721 }
3722 
3723 /// Emits a call or invoke instruction to the given nullary runtime function.
3724 llvm::CallSite
3726  const Twine &name) {
3727  return EmitRuntimeCallOrInvoke(callee, None, name);
3728 }
3729 
3730 /// Emits a call or invoke instruction to the given runtime function.
3731 llvm::CallSite
3734  const Twine &name) {
3735  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3736  callSite.setCallingConv(getRuntimeCC());
3737  return callSite;
3738 }
3739 
3740 /// Emits a call or invoke instruction to the given function, depending
3741 /// on the current state of the EH stack.
3742 llvm::CallSite
3745  const Twine &Name) {
3746  llvm::BasicBlock *InvokeDest = getInvokeDest();
3748  getBundlesForFunclet(Callee);
3749 
3750  llvm::Instruction *Inst;
3751  if (!InvokeDest)
3752  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3753  else {
3754  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3755  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3756  Name);
3757  EmitBlock(ContBB);
3758  }
3759 
3760  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3761  // optimizer it can aggressively ignore unwind edges.
3762  if (CGM.getLangOpts().ObjCAutoRefCount)
3763  AddObjCARCExceptionMetadata(Inst);
3764 
3765  return llvm::CallSite(Inst);
3766 }
3767 
3768 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3769  llvm::Value *New) {
3770  DeferredReplacements.push_back(std::make_pair(Old, New));
3771 }
3772 
3774  const CGCallee &Callee,
3775  ReturnValueSlot ReturnValue,
3776  const CallArgList &CallArgs,
3777  llvm::Instruction **callOrInvoke,
3778  SourceLocation Loc) {
3779  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3780 
3781  assert(Callee.isOrdinary() || Callee.isVirtual());
3782 
3783  // Handle struct-return functions by passing a pointer to the
3784  // location that we would like to return into.
3785  QualType RetTy = CallInfo.getReturnType();
3786  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3787 
3788  llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3789 
3790  // 1. Set up the arguments.
3791 
3792  // If we're using inalloca, insert the allocation after the stack save.
3793  // FIXME: Do this earlier rather than hacking it in here!
3794  Address ArgMemory = Address::invalid();
3795  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3796  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3797  const llvm::DataLayout &DL = CGM.getDataLayout();
3798  ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3799  llvm::Instruction *IP = CallArgs.getStackBase();
3800  llvm::AllocaInst *AI;
3801  if (IP) {
3802  IP = IP->getNextNode();
3803  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3804  "argmem", IP);
3805  } else {
3806  AI = CreateTempAlloca(ArgStruct, "argmem");
3807  }
3808  auto Align = CallInfo.getArgStructAlignment();
3809  AI->setAlignment(Align.getQuantity());
3810  AI->setUsedWithInAlloca(true);
3811  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3812  ArgMemory = Address(AI, Align);
3813  }
3814 
3815  // Helper function to drill into the inalloca allocation.
3816  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3817  auto FieldOffset =
3818  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3819  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3820  };
3821 
3822  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3823  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3824 
3825  // If the call returns a temporary with struct return, create a temporary
3826  // alloca to hold the result, unless one is given to us.
3827  Address SRetPtr = Address::invalid();
3828  Address SRetAlloca = Address::invalid();
3829  llvm::Value *UnusedReturnSizePtr = nullptr;
3830  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3831  if (!ReturnValue.isNull()) {
3832  SRetPtr = ReturnValue.getValue();
3833  } else {
3834  SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3835  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3836  uint64_t size =
3837  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3838  UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3839  }
3840  }
3841  if (IRFunctionArgs.hasSRetArg()) {
3842  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3843  } else if (RetAI.isInAlloca()) {
3844  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3845  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3846  }
3847  }
3848 
3849  Address swiftErrorTemp = Address::invalid();
3850  Address swiftErrorArg = Address::invalid();
3851 
3852  // Translate all of the arguments as necessary to match the IR lowering.
3853  assert(CallInfo.arg_size() == CallArgs.size() &&
3854  "Mismatch between function signature & arguments.");
3855  unsigned ArgNo = 0;
3856  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3857  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3858  I != E; ++I, ++info_it, ++ArgNo) {
3859  const ABIArgInfo &ArgInfo = info_it->info;
3860 
3861  // Insert a padding argument to ensure proper alignment.
3862  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3863  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3864  llvm::UndefValue::get(ArgInfo.getPaddingType());
3865 
3866  unsigned FirstIRArg, NumIRArgs;
3867  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3868 
3869  switch (ArgInfo.getKind()) {
3870  case ABIArgInfo::InAlloca: {
3871  assert(NumIRArgs == 0);
3872  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3873  if (I->isAggregate()) {
3874  // Replace the placeholder with the appropriate argument slot GEP.
3875  Address Addr = I->hasLValue()
3876  ? I->getKnownLValue().getAddress()
3877  : I->getKnownRValue().getAggregateAddress();
3878  llvm::Instruction *Placeholder =
3879  cast<llvm::Instruction>(Addr.getPointer());
3880  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3881  Builder.SetInsertPoint(Placeholder);
3882  Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3883  Builder.restoreIP(IP);
3884  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3885  } else {
3886  // Store the RValue into the argument struct.
3887  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3888  unsigned AS = Addr.getType()->getPointerAddressSpace();
3889  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3890  // There are some cases where a trivial bitcast is not avoidable. The
3891  // definition of a type later in a translation unit may change it's type
3892  // from {}* to (%struct.foo*)*.
3893  if (Addr.getType() != MemType)
3894  Addr = Builder.CreateBitCast(Addr, MemType);
3895  I->copyInto(*this, Addr);
3896  }
3897  break;
3898  }
3899 
3900  case ABIArgInfo::Indirect: {
3901  assert(NumIRArgs == 1);
3902  if (!I->isAggregate()) {
3903  // Make a temporary alloca to pass the argument.
3904  Address Addr = CreateMemTempWithoutCast(
3905  I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3906  IRCallArgs[FirstIRArg] = Addr.getPointer();
3907 
3908  I->copyInto(*this, Addr);
3909  } else {
3910  // We want to avoid creating an unnecessary temporary+copy here;
3911  // however, we need one in three cases:
3912  // 1. If the argument is not byval, and we are required to copy the
3913  // source. (This case doesn't occur on any common architecture.)
3914  // 2. If the argument is byval, RV is not sufficiently aligned, and
3915  // we cannot force it to be sufficiently aligned.
3916  // 3. If the argument is byval, but RV is not located in default
3917  // or alloca address space.
3918  Address Addr = I->hasLValue()
3919  ? I->getKnownLValue().getAddress()
3920  : I->getKnownRValue().getAggregateAddress();
3921  llvm::Value *V = Addr.getPointer();
3922  CharUnits Align = ArgInfo.getIndirectAlign();
3923  const llvm::DataLayout *TD = &CGM.getDataLayout();
3924 
3925  assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3926  IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3927  TD->getAllocaAddrSpace()) &&
3928  "indirect argument must be in alloca address space");
3929 
3930  bool NeedCopy = false;
3931 
3932  if (Addr.getAlignment() < Align &&
3933  llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3934  Align.getQuantity()) {
3935  NeedCopy = true;
3936  } else if (I->hasLValue()) {
3937  auto LV = I->getKnownLValue();
3938  auto AS = LV.getAddressSpace();
3939  if ((!ArgInfo.getIndirectByVal() &&
3940  (LV.getAlignment() >=
3941  getContext().getTypeAlignInChars(I->Ty))) ||
3942  (ArgInfo.getIndirectByVal() &&
3943  ((AS != LangAS::Default && AS != LangAS::opencl_private &&
3944  AS != CGM.getASTAllocaAddressSpace())))) {
3945  NeedCopy = true;
3946  }
3947  }
3948  if (NeedCopy) {
3949  // Create an aligned temporary, and copy to it.
3950  Address AI = CreateMemTempWithoutCast(
3951  I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3952  IRCallArgs[FirstIRArg] = AI.getPointer();
3953  I->copyInto(*this, AI);
3954  } else {
3955  // Skip the extra memcpy call.
3956  auto *T = V->getType()->getPointerElementType()->getPointerTo(
3957  CGM.getDataLayout().getAllocaAddrSpace());
3958  IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3959  *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3960  true);
3961  }
3962  }
3963  break;
3964  }
3965 
3966  case ABIArgInfo::Ignore:
3967  assert(NumIRArgs == 0);
3968  break;
3969 
3970  case ABIArgInfo::Extend:
3971  case ABIArgInfo::Direct: {
3972  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3973  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3974  ArgInfo.getDirectOffset() == 0) {
3975  assert(NumIRArgs == 1);
3976  llvm::Value *V;
3977  if (!I->isAggregate())
3978  V = I->getKnownRValue().getScalarVal();
3979  else
3980  V = Builder.CreateLoad(
3981  I->hasLValue() ? I->getKnownLValue().getAddress()
3982  : I->getKnownRValue().getAggregateAddress());
3983 
3984  // Implement swifterror by copying into a new swifterror argument.
3985  // We'll write back in the normal path out of the call.
3986  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3988  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3989 
3990  QualType pointeeTy = I->Ty->getPointeeType();
3991  swiftErrorArg =
3992  Address(V, getContext().getTypeAlignInChars(pointeeTy));
3993 
3994  swiftErrorTemp =
3995  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3996  V = swiftErrorTemp.getPointer();
3997  cast<llvm::AllocaInst>(V)->setSwiftError(true);
3998 
3999  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4000  Builder.CreateStore(errorValue, swiftErrorTemp);
4001  }
4002 
4003  // We might have to widen integers, but we should never truncate.
4004  if (ArgInfo.getCoerceToType() != V->getType() &&
4005  V->getType()->isIntegerTy())
4006  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4007 
4008  // If the argument doesn't match, perform a bitcast to coerce it. This
4009  // can happen due to trivial type mismatches.
4010  if (FirstIRArg < IRFuncTy->getNumParams() &&
4011  V->getType() != IRFuncTy->getParamType(FirstIRArg))
4012  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4013 
4014  IRCallArgs[FirstIRArg] = V;
4015  break;
4016  }
4017 
4018  // FIXME: Avoid the conversion through memory if possible.
4019  Address Src = Address::invalid();
4020  if (!I->isAggregate()) {
4021  Src = CreateMemTemp(I->Ty, "coerce");
4022  I->copyInto(*this, Src);
4023  } else {
4024  Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4025  : I->getKnownRValue().getAggregateAddress();
4026  }
4027 
4028  // If the value is offset in memory, apply the offset now.
4029  Src = emitAddressAtOffset(*this, Src, ArgInfo);
4030 
4031  // Fast-isel and the optimizer generally like scalar values better than
4032  // FCAs, so we flatten them if this is safe to do for this argument.
4033  llvm::StructType *STy =
4034  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4035  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4036  llvm::Type *SrcTy = Src.getType()->getElementType();
4037  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4038  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4039 
4040  // If the source type is smaller than the destination type of the
4041  // coerce-to logic, copy the source value into a temp alloca the size
4042  // of the destination type to allow loading all of it. The bits past
4043  // the source value are left undef.
4044  if (SrcSize < DstSize) {
4045  Address TempAlloca
4046  = CreateTempAlloca(STy, Src.getAlignment(),
4047  Src.getName() + ".coerce");
4048  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4049  Src = TempAlloca;
4050  } else {
4051  Src = Builder.CreateBitCast(Src,
4052  STy->getPointerTo(Src.getAddressSpace()));
4053  }
4054 
4055  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4056  assert(NumIRArgs == STy->getNumElements());
4057  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4058  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4059  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4060  llvm::Value *LI = Builder.CreateLoad(EltPtr);
4061  IRCallArgs[FirstIRArg + i] = LI;
4062  }
4063  } else {
4064  // In the simple case, just pass the coerced loaded value.
4065  assert(NumIRArgs == 1);
4066  IRCallArgs[FirstIRArg] =
4067  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4068  }
4069 
4070  break;
4071  }
4072 
4074  auto coercionType = ArgInfo.getCoerceAndExpandType();
4075  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4076 
4077  llvm::Value *tempSize = nullptr;
4078  Address addr = Address::invalid();
4079  Address AllocaAddr = Address::invalid();
4080  if (I->isAggregate()) {
4081  addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4082  : I->getKnownRValue().getAggregateAddress();
4083 
4084  } else {
4085  RValue RV = I->getKnownRValue();
4086  assert(RV.isScalar()); // complex should always just be direct
4087 
4088  llvm::Type *scalarType = RV.getScalarVal()->getType();
4089  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4090  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4091 
4092  // Materialize to a temporary.
4093  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4095  layout->getAlignment(), scalarAlign)),
4096  "tmp",
4097  /*ArraySize=*/nullptr, &AllocaAddr);
4098  tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4099 
4100  Builder.CreateStore(RV.getScalarVal(), addr);
4101  }
4102 
4103  addr = Builder.CreateElementBitCast(addr, coercionType);
4104 
4105  unsigned IRArgPos = FirstIRArg;
4106  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4107  llvm::Type *eltType = coercionType->getElementType(i);
4108  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4109  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4110  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4111  IRCallArgs[IRArgPos++] = elt;
4112  }
4113  assert(IRArgPos == FirstIRArg + NumIRArgs);
4114 
4115  if (tempSize) {
4116  EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4117  }
4118 
4119  break;
4120  }
4121 
4122  case ABIArgInfo::Expand:
4123  unsigned IRArgPos = FirstIRArg;
4124  ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4125  assert(IRArgPos == FirstIRArg + NumIRArgs);
4126  break;
4127  }
4128  }
4129 
4130  const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4131  llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4132 
4133  // If we're using inalloca, set up that argument.
4134  if (ArgMemory.isValid()) {
4135  llvm::Value *Arg = ArgMemory.getPointer();
4136  if (CallInfo.isVariadic()) {
4137  // When passing non-POD arguments by value to variadic functions, we will
4138  // end up with a variadic prototype and an inalloca call site. In such
4139  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4140  // the callee.
4141  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4142  auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4143  CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4144  } else {
4145  llvm::Type *LastParamTy =
4146  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4147  if (Arg->getType() != LastParamTy) {
4148 #ifndef NDEBUG
4149  // Assert that these structs have equivalent element types.
4150  llvm::StructType *FullTy = CallInfo.getArgStruct();
4151  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4152  cast<llvm::PointerType>(LastParamTy)->getElementType());
4153  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4154  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4155  DE = DeclaredTy->element_end(),
4156  FI = FullTy->element_begin();
4157  DI != DE; ++DI, ++FI)
4158  assert(*DI == *FI);
4159 #endif
4160  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4161  }
4162  }
4163  assert(IRFunctionArgs.hasInallocaArg());
4164  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4165  }
4166 
4167  // 2. Prepare the function pointer.
4168 
4169  // If the callee is a bitcast of a non-variadic function to have a
4170  // variadic function pointer type, check to see if we can remove the
4171  // bitcast. This comes up with unprototyped functions.
4172  //
4173  // This makes the IR nicer, but more importantly it ensures that we
4174  // can inline the function at -O0 if it is marked always_inline.
4175  auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4176  llvm::FunctionType *CalleeFT =
4177  cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4178  if (!CalleeFT->isVarArg())
4179  return Ptr;
4180 
4181  llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4182  if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4183  return Ptr;
4184 
4185  llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4186  if (!OrigFn)
4187  return Ptr;
4188 
4189  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4190 
4191  // If the original type is variadic, or if any of the component types
4192  // disagree, we cannot remove the cast.
4193  if (OrigFT->isVarArg() ||
4194  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4195  OrigFT->getReturnType() != CalleeFT->getReturnType())
4196  return Ptr;
4197 
4198  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4199  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4200  return Ptr;
4201 
4202  return OrigFn;
4203  };
4204  CalleePtr = simplifyVariadicCallee(CalleePtr);
4205 
4206  // 3. Perform the actual call.
4207 
4208  // Deactivate any cleanups that we're supposed to do immediately before
4209  // the call.
4210  if (!CallArgs.getCleanupsToDeactivate().empty())
4211  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4212 
4213  // Assert that the arguments we computed match up. The IR verifier
4214  // will catch this, but this is a common enough source of problems
4215  // during IRGen changes that it's way better for debugging to catch
4216  // it ourselves here.
4217 #ifndef NDEBUG
4218  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4219  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4220  // Inalloca argument can have different type.
4221  if (IRFunctionArgs.hasInallocaArg() &&
4222  i == IRFunctionArgs.getInallocaArgNo())
4223  continue;
4224  if (i < IRFuncTy->getNumParams())
4225  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4226  }
4227 #endif
4228 
4229  // Compute the calling convention and attributes.
4230  unsigned CallingConv;
4231  llvm::AttributeList Attrs;
4232  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4233  Callee.getAbstractInfo(), Attrs, CallingConv,
4234  /*AttrOnCallSite=*/true);
4235 
4236  // Apply some call-site-specific attributes.
4237  // TODO: work this into building the attribute set.
4238 
4239  // Apply always_inline to all calls within flatten functions.
4240  // FIXME: should this really take priority over __try, below?
4241  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4242  !(Callee.getAbstractInfo().getCalleeDecl() &&
4243  Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4244  Attrs =
4245  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4246  llvm::Attribute::AlwaysInline);
4247  }
4248 
4249  // Disable inlining inside SEH __try blocks.
4250  if (isSEHTryScope()) {
4251  Attrs =
4252  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4253  llvm::Attribute::NoInline);
4254  }
4255 
4256  // Decide whether to use a call or an invoke.
4257  bool CannotThrow;
4258  if (currentFunctionUsesSEHTry()) {
4259  // SEH cares about asynchronous exceptions, so everything can "throw."
4260  CannotThrow = false;
4261  } else if (isCleanupPadScope() &&
4263  // The MSVC++ personality will implicitly terminate the program if an
4264  // exception is thrown during a cleanup outside of a try/catch.
4265  // We don't need to model anything in IR to get this behavior.
4266  CannotThrow = true;
4267  } else {
4268  // Otherwise, nounwind call sites will never throw.
4269  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4270  llvm::Attribute::NoUnwind);
4271  }
4272 
4273  // If we made a temporary, be sure to clean up after ourselves. Note that we
4274  // can't depend on being inside of an ExprWithCleanups, so we need to manually
4275  // pop this cleanup later on. Being eager about this is OK, since this
4276  // temporary is 'invisible' outside of the callee.
4277  if (UnusedReturnSizePtr)
4278  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4279  UnusedReturnSizePtr);
4280 
4281  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4282 
4284  getBundlesForFunclet(CalleePtr);
4285 
4286  // Emit the actual call/invoke instruction.
4287  llvm::CallSite CS;
4288  if (!InvokeDest) {
4289  CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4290  } else {
4291  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4292  CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4293  BundleList);
4294  EmitBlock(Cont);
4295  }
4296  llvm::Instruction *CI = CS.getInstruction();
4297  if (callOrInvoke)
4298  *callOrInvoke = CI;
4299 
4300  // Apply the attributes and calling convention.
4301  CS.setAttributes(Attrs);
4302  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4303 
4304  // Apply various metadata.
4305 
4306  if (!CI->getType()->isVoidTy())
4307  CI->setName("call");
4308 
4309  // Insert instrumentation or attach profile metadata at indirect call sites.
4310  // For more details, see the comment before the definition of
4311  // IPVK_IndirectCallTarget in InstrProfData.inc.
4312  if (!CS.getCalledFunction())
4313  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4314  CI, CalleePtr);
4315 
4316  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4317  // optimizer it can aggressively ignore unwind edges.
4318  if (CGM.getLangOpts().ObjCAutoRefCount)
4319  AddObjCARCExceptionMetadata(CI);
4320 
4321  // Suppress tail calls if requested.
4322  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4323  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4324  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4325  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4326  }
4327 
4328  // 4. Finish the call.
4329 
4330  // If the call doesn't return, finish the basic block and clear the
4331  // insertion point; this allows the rest of IRGen to discard
4332  // unreachable code.
4333  if (CS.doesNotReturn()) {
4334  if (UnusedReturnSizePtr)
4335  PopCleanupBlock();
4336 
4337  // Strip away the noreturn attribute to better diagnose unreachable UB.
4338  if (SanOpts.has(SanitizerKind::Unreachable)) {
4339  if (auto *F = CS.getCalledFunction())
4340  F->removeFnAttr(llvm::Attribute::NoReturn);
4341  CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4342  llvm::Attribute::NoReturn);
4343  }
4344 
4345  EmitUnreachable(Loc);
4346  Builder.ClearInsertionPoint();
4347 
4348  // FIXME: For now, emit a dummy basic block because expr emitters in
4349  // generally are not ready to handle emitting expressions at unreachable
4350  // points.
4351  EnsureInsertPoint();
4352 
4353  // Return a reasonable RValue.
4354  return GetUndefRValue(RetTy);
4355  }
4356 
4357  // Perform the swifterror writeback.
4358  if (swiftErrorTemp.isValid()) {
4359  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4360  Builder.CreateStore(errorResult, swiftErrorArg);
4361  }
4362 
4363  // Emit any call-associated writebacks immediately. Arguably this
4364  // should happen after any return-value munging.
4365  if (CallArgs.hasWritebacks())
4366  emitWritebacks(*this, CallArgs);
4367 
4368  // The stack cleanup for inalloca arguments has to run out of the normal
4369  // lexical order, so deactivate it and run it manually here.
4370  CallArgs.freeArgumentMemory(*this);
4371 
4372  // Extract the return value.
4373  RValue Ret = [&] {
4374  switch (RetAI.getKind()) {
4376  auto coercionType = RetAI.getCoerceAndExpandType();
4377  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4378 
4379  Address addr = SRetPtr;
4380  addr = Builder.CreateElementBitCast(addr, coercionType);
4381 
4382  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4383  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4384 
4385  unsigned unpaddedIndex = 0;
4386  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4387  llvm::Type *eltType = coercionType->getElementType(i);
4388  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4389  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4390  llvm::Value *elt = CI;
4391  if (requiresExtract)
4392  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4393  else
4394  assert(unpaddedIndex == 0);
4395  Builder.CreateStore(elt, eltAddr);
4396  }
4397  // FALLTHROUGH
4398  LLVM_FALLTHROUGH;
4399  }
4400 
4401  case ABIArgInfo::InAlloca:
4402  case ABIArgInfo::Indirect: {
4403  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4404  if (UnusedReturnSizePtr)
4405  PopCleanupBlock();
4406  return ret;
4407  }
4408 
4409  case ABIArgInfo::Ignore:
4410  // If we are ignoring an argument that had a result, make sure to
4411  // construct the appropriate return value for our caller.
4412  return GetUndefRValue(RetTy);
4413 
4414  case ABIArgInfo::Extend:
4415  case ABIArgInfo::Direct: {
4416  llvm::Type *RetIRTy = ConvertType(RetTy);
4417  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4418  switch (getEvaluationKind(RetTy)) {
4419  case TEK_Complex: {
4420  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4421  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4422  return RValue::getComplex(std::make_pair(Real, Imag));
4423  }
4424  case TEK_Aggregate: {
4425  Address DestPtr = ReturnValue.getValue();
4426  bool DestIsVolatile = ReturnValue.isVolatile();
4427 
4428  if (!DestPtr.isValid()) {
4429  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4430  DestIsVolatile = false;
4431  }
4432  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4433  return RValue::getAggregate(DestPtr);
4434  }
4435  case TEK_Scalar: {
4436  // If the argument doesn't match, perform a bitcast to coerce it. This
4437  // can happen due to trivial type mismatches.
4438  llvm::Value *V = CI;
4439  if (V->getType() != RetIRTy)
4440  V = Builder.CreateBitCast(V, RetIRTy);
4441  return RValue::get(V);
4442  }
4443  }
4444  llvm_unreachable("bad evaluation kind");
4445  }
4446 
4447  Address DestPtr = ReturnValue.getValue();
4448  bool DestIsVolatile = ReturnValue.isVolatile();
4449 
4450  if (!DestPtr.isValid()) {
4451  DestPtr = CreateMemTemp(RetTy, "coerce");
4452  DestIsVolatile = false;
4453  }
4454 
4455  // If the value is offset in memory, apply the offset now.
4456  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4457  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4458 
4459  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4460  }
4461 
4462  case ABIArgInfo::Expand:
4463  llvm_unreachable("Invalid ABI kind for return argument");
4464  }
4465 
4466  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4467  } ();
4468 
4469  // Emit the assume_aligned check on the return value.
4470  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4471  if (Ret.isScalar() && TargetDecl) {
4472  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4473  llvm::Value *OffsetValue = nullptr;
4474  if (const auto *Offset = AA->getOffset())
4475  OffsetValue = EmitScalarExpr(Offset);
4476 
4477  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4478  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4479  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4480  OffsetValue);
4481  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4482  llvm::Value *ParamVal =
4483  CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
4484  *this).getScalarVal();
4485  EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4486  }
4487  }
4488 
4489  return Ret;
4490 }
4491 
4493  if (isVirtual()) {
4494  const CallExpr *CE = getVirtualCallExpr();
4496  CGF, getVirtualMethodDecl(), getThisAddress(),
4497  getFunctionType(), CE ? CE->getLocStart() : SourceLocation());
4498  }
4499 
4500  return *this;
4501 }
4502 
4503 /* VarArg handling */
4504 
4506  VAListAddr = VE->isMicrosoftABI()
4507  ? EmitMSVAListRef(VE->getSubExpr())
4508  : EmitVAListRef(VE->getSubExpr());
4509  QualType Ty = VE->getType();
4510  if (VE->isMicrosoftABI())
4511  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4512  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4513 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:653
const llvm::DataLayout & getDataLayout() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1517
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:177
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:361
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3403
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Represents a function declaration or definition.
Definition: Decl.h:1714
Address getAddress() const
Definition: CGValue.h:569
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:627
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
Definition: CGCall.cpp:2962
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2375
Complete object ctor.
Definition: ABI.h:26
CanQualType VoidPtrTy
Definition: ASTContext.h:1025
A (possibly-)qualified type.
Definition: Type.h:655
bool isBlockPointerType() const
Definition: Type.h:6057
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses &#39;sret&#39; when used as a return type.
Definition: CGCall.cpp:1500
bool getNoCfCheck() const
Definition: Type.h:3222
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition: CGCall.cpp:259
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:79
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3104
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1112
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:71
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2696
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:562
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:175
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3148
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:187
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:460
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:935
const Decl * getCalleeDecl() const
Definition: CGCall.h:63
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:3764
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:2043
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:826
Extend - Valid only for integer argument types.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:992
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4505
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3099
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:242
bool isVirtual() const
Definition: DeclCXX.h:2076
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Definition: CGCall.cpp:4492
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const Expr * getSubExpr() const
Definition: Expr.h:3876
void addUncopiedAggregate(LValue LV, QualType type)
Definition: CGCall.h:287
bool isVolatile() const
Definition: CGValue.h:301
The base class of the type hierarchy.
Definition: Type.h:1421
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1865
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2165
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:5889
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:958
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:672
const ParmVarDecl * getParamDecl(unsigned I) const
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3743
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:1965
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:372
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2463
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2145
bool hasWritebacks() const
Definition: CGCall.h:312
Default closure variant of a ctor.
Definition: ABI.h:30
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3440
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3773
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
Represents a variable declaration or definition.
Definition: Decl.h:812
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:104
llvm::Instruction * getStackBase() const
Definition: CGCall.h:334
unsigned getNumParams() const
Definition: Type.h:3605
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:178
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1216
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6456
void setCoerceToType(llvm::Type *T)
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3251
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:139
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3342
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:234
llvm::Value * getPointer() const
Definition: Address.h:38
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:300
Address getValue() const
Definition: CGCall.h:381
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
Represents a parameter to a function.
Definition: Decl.h:1533
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:57
void add(RValue rvalue, QualType type)
Definition: CGCall.h:285
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:46
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:420
Represents a struct/union/class.
Definition: Decl.h:3548
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3334
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:339
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2437
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1004
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3170
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2776
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:3706
Address getAddress() const
Definition: CGValue.h:327
unsigned getRegParm() const
Definition: Type.h:3225
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:150
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:3765
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
field_range fields() const
Definition: Decl.h:3764
bool isVolatileQualified() const
Definition: CGValue.h:258
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2198
Represents a member of a struct/union/class.
Definition: Decl.h:2521
CharUnits getAlignment() const
Definition: CGValue.h:316
RequiredArgs getRequiredArgs() const
bool isUsingInAlloca() const
Returns if we&#39;re using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:339
unsigned getFunctionScopeIndex() const
Returns the index of this parameter in its prototype or method scope.
Definition: Decl.h:1586
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isOrdinary() const
Definition: CGCall.h:169
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:266
CharUnits getArgStructAlignment() const
bool isReferenceType() const
Definition: Type.h:6061
Interesting information about a specific parameter that can&#39;t simply be reflected in parameter&#39;s type...
Definition: Type.h:3390
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2188
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that&#39;s being passed call-by-writeback.
Definition: CGCall.cpp:3198
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
bool isVirtual() const
Definition: CGCall.h:187
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:321
bool getProducesResult() const
Definition: Type.h:3220
llvm::FunctionType * getFunctionType() const
Definition: CGCall.h:203
bool isGLValue() const
Definition: Expr.h:252
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:285
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2569
void copyInto(CodeGenFunction &CGF, Address A) const
Definition: CGCall.cpp:3531
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:157
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
uint32_t Offset
Definition: CacheTokens.cpp:43
llvm::StructType * getCoerceAndExpandType() const
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:215
Wrapper for source info for functions.
Definition: TypeLoc.h:1396
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:109
unsigned getInAllocaFieldIndex() const
const_arg_iterator arg_begin() const
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:66
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:390
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1814
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:259
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:134
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:468
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:702
Values of this type can never be null.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
bool isSimple() const
Definition: CGValue.h:252
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:273
bool isInstance() const
Definition: DeclCXX.h:2059
An ordinary object is located at an address in memory.
Definition: Specifiers.h:126
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1663
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:106
FunctionType::ExtInfo getExtInfo() const
QualType getReturnType() const
Definition: DeclObjC.h:361
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:737
bool getNoReturn() const
Definition: Type.h:3219
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:84
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:71
bool getNoCallerSavedRegs() const
Definition: Type.h:3221
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3548
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:510
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Derives the &#39;this&#39; type for codegen purposes, i.e.
Definition: CGCall.cpp:73
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Definition: TargetInfo.h:305
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3278
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:499
Represents a K&R-style &#39;int foo()&#39; function, which has no information available about its arguments...
Definition: Type.h:3334
bool hasAttr() const
Definition: DeclBase.h:536
CanQualType getReturnType() const
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:357
bool isValid() const
Definition: Address.h:36
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1590
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3369
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:3881
const TargetCodeGenInfo & getTargetCodeGenInfo()
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
writeback_const_range writebacks() const
Definition: CGCall.h:317
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:306
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:3047
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:3858
Address Temporary
The temporary alloca.
Definition: CGCall.h:271
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:107
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:274
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:3026
Expr - This represents one expression.
Definition: Expr.h:106
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2620
static Address invalid()
Definition: Address.h:35
llvm::Type * getUnpaddedCoerceAndExpandType() const
const FunctionProtoType * T
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:3021
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:691
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:88
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:66
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:695
bool getHasRegParm() const
Definition: Type.h:3223
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6519
bool isObjCRetainableType() const
Definition: Type.cpp:3894
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2686
llvm::Constant * objc_retain
id objc_retain(id);
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:356
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2557
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:3673
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when &#39;sret&#39; is used as a return type...
Definition: CGCall.cpp:1505
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:640
QualType getType() const
Definition: Expr.h:128
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1346
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes &#39;this&#39; as the first parameter followed by varargs.
Definition: CGCall.cpp:529
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2714
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:197
llvm::PointerType * AllocaInt8PtrTy
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1782
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:2050
ASTContext & getContext() const
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:444
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1162
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:236
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:35
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1268
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:412
CanQualType getCanonicalTypeUnqualified() const
LValue getKnownLValue() const
Definition: CGCall.h:240
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Definition: Type.h:4082
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1293
Enumerates target-specific builtins in their own namespaces within namespace clang.
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:506
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:182
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses &#39;fpret&#39; when used as a return type.
Definition: CGCall.cpp:1510
CanProxy< U > castAs() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3187
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3324
Encodes a location in the source.
QualType getReturnType() const
Definition: Type.h:3302
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2072
A saved depth on the scope stack.
Definition: EHScopeStack.h:107
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:290
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3732
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1235
CallingConv getCC() const
Definition: Type.h:3232
const Decl * getDecl() const
Definition: GlobalDecl.h:64
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C &#39;SEL&#39; type.
Definition: ASTContext.h:1832
An aggregate value slot.
Definition: CGValue.h:437
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:455
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2031
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2354
const_arg_iterator arg_end() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ObjCEntrypoints & getObjCEntrypoints() const
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3326
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:713
CanQualType VoidTy
Definition: ASTContext.h:997
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
bool isAnyPointerType() const
Definition: Type.h:6053
An aligned address.
Definition: Address.h:25
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after...
Definition: Type.h:1166
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:697
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:178
All available information about a concrete callee.
Definition: CGCall.h:67
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:364
Complete object dtor.
Definition: ABI.h:36
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses &#39;fp2ret&#39; when used as a return type.
Definition: CGCall.cpp:1527
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1688
bool hasFlexibleArrayMember() const
Definition: Decl.h:3639
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3640
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:620
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:543
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1126
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:356
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:176
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2363
ExtInfo getExtInfo() const
Definition: Type.h:3313
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:93
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
LValue Source
The original argument.
Definition: CGCall.h:268
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:431
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3701
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:91
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:1000
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:167
void EmitARCIntrinsicUse(ArrayRef< llvm::Value *> values)
Given a number of pointers, inform the optimizer that they&#39;re being intrinsically used up until this ...
Definition: CGObjC.cpp:1806
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2151
SourceLocation getLocStart() const LLVM_READONLY
Definition: Decl.h:738
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
RValue getRValue(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3521
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:796
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:401
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
uint64_t SanitizerMask
Definition: Sanitizers.h:26
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4072
Complex values, per C99 6.2.5p11.
Definition: Type.h:2315
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
static bool classof(const OMPClause *T)
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:1983
QualType getCanonicalTypeInternal() const
Definition: Type.h:2199
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:6304
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable &#39;self&#39;, remove it.
Definition: CGCall.cpp:2657
CharUnits getIndirectAlign() const
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:44
T * getAttr() const
Definition: DeclBase.h:532
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:645
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:120
SourceLocation getLocStart() const LLVM_READONLY
Definition: Expr.cpp:1357
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
Expand - Only valid for aggregate argument types.
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2511
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type *>::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:978
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:903
bool isParamDestroyedInCallee() const
Definition: Decl.h:3711
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:445
Represents a base class of a C++ class.
Definition: DeclCXX.h:192
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2040
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:1993
ASTContext & getContext() const
Definition: CodeGenTypes.h:174
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:134
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:518
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl...
Definition: CGCall.cpp:1672
LangAS getAddressSpace() const
Definition: CGValue.h:314
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
RValue getKnownRValue() const
Definition: CGCall.h:244
Represents a C++ struct/union/class.
Definition: DeclCXX.h:300
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:338
bool isVoidType() const
Definition: Type.h:6276
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2203
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:5852
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1199
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like &#39;int&#39;.