clang  8.0.0svn
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
47  switch (CC) {
48  default: return llvm::CallingConv::C;
49  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53  case CC_Win64: return llvm::CallingConv::Win64;
54  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58  // TODO: Add support for __pascal to LLVM.
60  // TODO: Add support for __vectorcall to LLVM.
61  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66  case CC_Swift: return llvm::CallingConv::Swift;
67  }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
71 /// qualification.
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76 }
77 
78 /// Returns the canonical formal type of the given C++ method.
80  return MD->getType()->getCanonicalTypeUnqualified()
82 }
83 
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type. Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
90 }
91 
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
96  // When translating an unprototyped function type, always use a
97  // variadic type.
98  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99  /*instanceMethod=*/false,
100  /*chainCall=*/false, None,
101  FTNP->getExtInfo(), {}, RequiredArgs(0));
102 }
103 
106  const FunctionProtoType *proto,
107  unsigned prefixArgs,
108  unsigned totalArgs) {
109  assert(proto->hasExtParameterInfos());
110  assert(paramInfos.size() <= prefixArgs);
111  assert(proto->getNumParams() + prefixArgs <= totalArgs);
112 
113  paramInfos.reserve(totalArgs);
114 
115  // Add default infos for any prefix args that don't already have infos.
116  paramInfos.resize(prefixArgs);
117 
118  // Add infos for the prototype.
119  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120  paramInfos.push_back(ParamInfo);
121  // pass_object_size params have no parameter info.
122  if (ParamInfo.hasPassObjectSize())
123  paramInfos.emplace_back();
124  }
125 
126  assert(paramInfos.size() <= totalArgs &&
127  "Did we forget to insert pass_object_size args?");
128  // Add default infos for the variadic and/or suffix arguments.
129  paramInfos.resize(totalArgs);
130 }
131 
132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
138  // Fast path: don't touch param info if we don't need to.
139  if (!FPT->hasExtParameterInfos()) {
140  assert(paramInfos.empty() &&
141  "We have paramInfos, but the prototype doesn't?");
142  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143  return;
144  }
145 
146  unsigned PrefixSize = prefix.size();
147  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148  // parameters; the only thing that can change this is the presence of
149  // pass_object_size. So, we preallocate for the common case.
150  prefix.reserve(prefix.size() + FPT->getNumParams());
151 
152  auto ExtInfos = FPT->getExtParameterInfos();
153  assert(ExtInfos.size() == FPT->getNumParams());
154  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155  prefix.push_back(FPT->getParamType(I));
156  if (ExtInfos[I].hasPassObjectSize())
157  prefix.push_back(CGT.getContext().getSizeType());
158  }
159 
160  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161  prefix.size());
162 }
163 
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
170  const FunctionDecl *FD) {
171  SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
172  RequiredArgs Required =
173  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174  // FIXME: Kill copy.
175  appendParameterTypes(CGT, prefix, paramInfos, FTP);
176  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177 
178  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179  /*chainCall=*/false, prefix,
180  FTP->getExtInfo(), paramInfos,
181  Required);
182 }
183 
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
188  const FunctionDecl *FD) {
190  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191  FTP, FD);
192 }
193 
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195  // Set the appropriate calling convention for the Function.
196  if (D->hasAttr<StdCallAttr>())
197  return CC_X86StdCall;
198 
199  if (D->hasAttr<FastCallAttr>())
200  return CC_X86FastCall;
201 
202  if (D->hasAttr<RegCallAttr>())
203  return CC_X86RegCall;
204 
205  if (D->hasAttr<ThisCallAttr>())
206  return CC_X86ThisCall;
207 
208  if (D->hasAttr<VectorCallAttr>())
209  return CC_X86VectorCall;
210 
211  if (D->hasAttr<PascalAttr>())
212  return CC_X86Pascal;
213 
214  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216 
217  if (D->hasAttr<IntelOclBiccAttr>())
218  return CC_IntelOclBicc;
219 
220  if (D->hasAttr<MSABIAttr>())
221  return IsWindows ? CC_C : CC_Win64;
222 
223  if (D->hasAttr<SysVABIAttr>())
224  return IsWindows ? CC_X86_64SysV : CC_C;
225 
226  if (D->hasAttr<PreserveMostAttr>())
227  return CC_PreserveMost;
228 
229  if (D->hasAttr<PreserveAllAttr>())
230  return CC_PreserveAll;
231 
232  return CC_C;
233 }
234 
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 /// so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
243  const FunctionProtoType *FTP,
244  const CXXMethodDecl *MD) {
246 
247  // Add the 'this' pointer.
248  if (RD)
249  argTypes.push_back(GetThisType(Context, RD));
250  else
251  argTypes.push_back(Context.VoidPtrTy);
252 
254  *this, true, argTypes,
256 }
257 
258 /// Set calling convention for CUDA/HIP kernel.
260  const FunctionDecl *FD) {
261  if (FD->hasAttr<CUDAGlobalAttr>()) {
262  const FunctionType *FT = FTy->getAs<FunctionType>();
264  FTy = FT->getCanonicalTypeUnqualified();
265  }
266 }
267 
268 /// Arrange the argument and result information for a declaration or
269 /// definition of the given C++ non-static member function. The
270 /// member function must be an ordinary function, i.e. not a
271 /// constructor or destructor.
272 const CGFunctionInfo &
274  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
275  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
276 
277  CanQualType FT = GetFormalType(MD).getAs<Type>();
278  setCUDAKernelCallingConvention(FT, CGM, MD);
279  auto prototype = FT.getAs<FunctionProtoType>();
280 
281  if (MD->isInstance()) {
282  // The abstract case is perfectly fine.
283  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
284  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
285  }
286 
287  return arrangeFreeFunctionType(prototype, MD);
288 }
289 
291  const InheritedConstructor &Inherited, CXXCtorType Type) {
292  // Parameters are unnecessary if we're constructing a base class subobject
293  // and the inherited constructor lives in a virtual base.
294  return Type == Ctor_Complete ||
295  !Inherited.getShadowDecl()->constructsVirtualBase() ||
296  !Target.getCXXABI().hasConstructorVariants();
297  }
298 
299 const CGFunctionInfo &
301  StructorType Type) {
302 
305  argTypes.push_back(GetThisType(Context, MD->getParent()));
306 
307  bool PassParams = true;
308 
309  GlobalDecl GD;
310  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
311  GD = GlobalDecl(CD, toCXXCtorType(Type));
312 
313  // A base class inheriting constructor doesn't get forwarded arguments
314  // needed to construct a virtual base (or base class thereof).
315  if (auto Inherited = CD->getInheritedConstructor())
316  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
317  } else {
318  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
319  GD = GlobalDecl(DD, toCXXDtorType(Type));
320  }
321 
323 
324  // Add the formal parameters.
325  if (PassParams)
326  appendParameterTypes(*this, argTypes, paramInfos, FTP);
327 
328  CGCXXABI::AddedStructorArgs AddedArgs =
329  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
330  if (!paramInfos.empty()) {
331  // Note: prefix implies after the first param.
332  if (AddedArgs.Prefix)
333  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
335  if (AddedArgs.Suffix)
336  paramInfos.append(AddedArgs.Suffix,
338  }
339 
340  RequiredArgs required =
341  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
343 
344  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
345  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
346  ? argTypes.front()
347  : TheCXXABI.hasMostDerivedReturn(GD)
348  ? CGM.getContext().VoidPtrTy
349  : Context.VoidTy;
350  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
351  /*chainCall=*/false, argTypes, extInfo,
352  paramInfos, required);
353 }
354 
357  SmallVector<CanQualType, 16> argTypes;
358  for (auto &arg : args)
359  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
360  return argTypes;
361 }
362 
363 static SmallVector<CanQualType, 16>
365  SmallVector<CanQualType, 16> argTypes;
366  for (auto &arg : args)
367  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
368  return argTypes;
369 }
370 
373  unsigned prefixArgs, unsigned totalArgs) {
375  if (proto->hasExtParameterInfos()) {
376  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
377  }
378  return result;
379 }
380 
381 /// Arrange a call to a C++ method, passing the given arguments.
382 ///
383 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
384 /// parameter.
385 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
386 /// args.
387 /// PassProtoArgs indicates whether `args` has args for the parameters in the
388 /// given CXXConstructorDecl.
389 const CGFunctionInfo &
391  const CXXConstructorDecl *D,
392  CXXCtorType CtorKind,
393  unsigned ExtraPrefixArgs,
394  unsigned ExtraSuffixArgs,
395  bool PassProtoArgs) {
396  // FIXME: Kill copy.
398  for (const auto &Arg : args)
399  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
400 
401  // +1 for implicit this, which should always be args[0].
402  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
403 
405  RequiredArgs Required =
406  RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
407  GlobalDecl GD(D, CtorKind);
408  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
409  ? ArgTypes.front()
410  : TheCXXABI.hasMostDerivedReturn(GD)
411  ? CGM.getContext().VoidPtrTy
412  : Context.VoidTy;
413 
414  FunctionType::ExtInfo Info = FPT->getExtInfo();
416  // If the prototype args are elided, we should only have ABI-specific args,
417  // which never have param info.
418  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
419  // ABI-specific suffix arguments are treated the same as variadic arguments.
420  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
421  ArgTypes.size());
422  }
423  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
424  /*chainCall=*/false, ArgTypes, Info,
425  ParamInfos, Required);
426 }
427 
428 /// Arrange the argument and result information for the declaration or
429 /// definition of the given function.
430 const CGFunctionInfo &
432  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
433  if (MD->isInstance())
434  return arrangeCXXMethodDeclaration(MD);
435 
437 
438  assert(isa<FunctionType>(FTy));
439  setCUDAKernelCallingConvention(FTy, CGM, FD);
440 
441  // When declaring a function without a prototype, always use a
442  // non-variadic type.
445  noProto->getReturnType(), /*instanceMethod=*/false,
446  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
447  }
448 
450 }
451 
452 /// Arrange the argument and result information for the declaration or
453 /// definition of an Objective-C method.
454 const CGFunctionInfo &
456  // It happens that this is the same as a call with no optional
457  // arguments, except also using the formal 'self' type.
459 }
460 
461 /// Arrange the argument and result information for the function type
462 /// through which to perform a send to the given Objective-C method,
463 /// using the given receiver type. The receiver type is not always
464 /// the 'self' type of the method or even an Objective-C pointer type.
465 /// This is *not* the right method for actually performing such a
466 /// message send, due to the possibility of optional arguments.
467 const CGFunctionInfo &
469  QualType receiverType) {
472  argTys.push_back(Context.getCanonicalParamType(receiverType));
473  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
474  // FIXME: Kill copy?
475  for (const auto *I : MD->parameters()) {
476  argTys.push_back(Context.getCanonicalParamType(I->getType()));
478  I->hasAttr<NoEscapeAttr>());
479  extParamInfos.push_back(extParamInfo);
480  }
481 
482  FunctionType::ExtInfo einfo;
483  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
484  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
485 
486  if (getContext().getLangOpts().ObjCAutoRefCount &&
487  MD->hasAttr<NSReturnsRetainedAttr>())
488  einfo = einfo.withProducesResult(true);
489 
490  RequiredArgs required =
491  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
492 
494  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
495  /*chainCall=*/false, argTys, einfo, extParamInfos, required);
496 }
497 
498 const CGFunctionInfo &
500  const CallArgList &args) {
501  auto argTypes = getArgTypesForCall(Context, args);
502  FunctionType::ExtInfo einfo;
503 
505  GetReturnType(returnType), /*instanceMethod=*/false,
506  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
507 }
508 
509 const CGFunctionInfo &
511  // FIXME: Do we need to handle ObjCMethodDecl?
512  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
513 
514  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
516 
517  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
519 
520  return arrangeFunctionDeclaration(FD);
521 }
522 
523 /// Arrange a thunk that takes 'this' as the first parameter followed by
524 /// varargs. Return a void pointer, regardless of the actual return type.
525 /// The body of the thunk will end in a musttail call to a function of the
526 /// correct type, and the caller will bitcast the function to the correct
527 /// prototype.
528 const CGFunctionInfo &
530  assert(MD->isVirtual() && "only methods have thunks");
532  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
533  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534  /*chainCall=*/false, ArgTys,
535  FTP->getExtInfo(), {}, RequiredArgs(1));
536 }
537 
538 const CGFunctionInfo &
540  CXXCtorType CT) {
541  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
542 
545  const CXXRecordDecl *RD = CD->getParent();
546  ArgTys.push_back(GetThisType(Context, RD));
547  if (CT == Ctor_CopyingClosure)
548  ArgTys.push_back(*FTP->param_type_begin());
549  if (RD->getNumVBases() > 0)
550  ArgTys.push_back(Context.IntTy);
552  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554  /*chainCall=*/false, ArgTys,
555  FunctionType::ExtInfo(CC), {},
557 }
558 
559 /// Arrange a call as unto a free function, except possibly with an
560 /// additional number of formal parameters considered required.
561 static const CGFunctionInfo &
563  CodeGenModule &CGM,
564  const CallArgList &args,
565  const FunctionType *fnType,
566  unsigned numExtraRequiredArgs,
567  bool chainCall) {
568  assert(args.size() >= numExtraRequiredArgs);
569 
571 
572  // In most cases, there are no optional arguments.
573  RequiredArgs required = RequiredArgs::All;
574 
575  // If we have a variadic prototype, the required arguments are the
576  // extra prefix plus the arguments in the prototype.
577  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578  if (proto->isVariadic())
579  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
580 
581  if (proto->hasExtParameterInfos())
582  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583  args.size());
584 
585  // If we don't have a prototype at all, but we're supposed to
586  // explicitly use the variadic convention for unprototyped calls,
587  // treat all of the arguments as required but preserve the nominal
588  // possibility of variadics.
589  } else if (CGM.getTargetCodeGenInfo()
590  .isNoProtoCallVariadic(args,
591  cast<FunctionNoProtoType>(fnType))) {
592  required = RequiredArgs(args.size());
593  }
594 
595  // FIXME: Kill copy.
596  SmallVector<CanQualType, 16> argTypes;
597  for (const auto &arg : args)
598  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
600  /*instanceMethod=*/false, chainCall,
601  argTypes, fnType->getExtInfo(), paramInfos,
602  required);
603 }
604 
605 /// Figure out the rules for calling a function with the given formal
606 /// type using the given arguments. The arguments are necessary
607 /// because the function might be unprototyped, in which case it's
608 /// target-dependent in crazy ways.
609 const CGFunctionInfo &
611  const FunctionType *fnType,
612  bool chainCall) {
613  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614  chainCall ? 1 : 0, chainCall);
615 }
616 
617 /// A block function is essentially a free function with an
618 /// extra implicit argument.
619 const CGFunctionInfo &
621  const FunctionType *fnType) {
622  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623  /*chainCall=*/false);
624 }
625 
626 const CGFunctionInfo &
628  const FunctionArgList &params) {
629  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630  auto argTypes = getArgTypesForDeclaration(Context, params);
631 
633  GetReturnType(proto->getReturnType()),
634  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
635  proto->getExtInfo(), paramInfos,
636  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
637 }
638 
639 const CGFunctionInfo &
641  const CallArgList &args) {
642  // FIXME: Kill copy.
644  for (const auto &Arg : args)
645  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
647  GetReturnType(resultType), /*instanceMethod=*/false,
648  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
649  /*paramInfos=*/ {}, RequiredArgs::All);
650 }
651 
652 const CGFunctionInfo &
654  const FunctionArgList &args) {
655  auto argTypes = getArgTypesForDeclaration(Context, args);
656 
658  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
659  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
660 }
661 
662 const CGFunctionInfo &
664  ArrayRef<CanQualType> argTypes) {
666  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
667  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
668 }
669 
670 /// Arrange a call to a C++ method, passing the given arguments.
671 ///
672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
673 /// does not count `this`.
674 const CGFunctionInfo &
676  const FunctionProtoType *proto,
677  RequiredArgs required,
678  unsigned numPrefixArgs) {
679  assert(numPrefixArgs + 1 <= args.size() &&
680  "Emitting a call with less args than the required prefix?");
681  // Add one to account for `this`. It's a bit awkward here, but we don't count
682  // `this` in similar places elsewhere.
683  auto paramInfos =
684  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
685 
686  // FIXME: Kill copy.
687  auto argTypes = getArgTypesForCall(Context, args);
688 
689  FunctionType::ExtInfo info = proto->getExtInfo();
691  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
692  /*chainCall=*/false, argTypes, info, paramInfos, required);
693 }
694 
697  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
699 }
700 
701 const CGFunctionInfo &
703  const CallArgList &args) {
704  assert(signature.arg_size() <= args.size());
705  if (signature.arg_size() == args.size())
706  return signature;
707 
709  auto sigParamInfos = signature.getExtParameterInfos();
710  if (!sigParamInfos.empty()) {
711  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
712  paramInfos.resize(args.size());
713  }
714 
715  auto argTypes = getArgTypesForCall(Context, args);
716 
717  assert(signature.getRequiredArgs().allowsOptionalArgs());
718  return arrangeLLVMFunctionInfo(signature.getReturnType(),
719  signature.isInstanceMethod(),
720  signature.isChainCall(),
721  argTypes,
722  signature.getExtInfo(),
723  paramInfos,
724  signature.getRequiredArgs());
725 }
726 
727 namespace clang {
728 namespace CodeGen {
730 }
731 }
732 
733 /// Arrange the argument and result information for an abstract value
734 /// of a given function type. This is the method which all of the
735 /// above functions ultimately defer to.
736 const CGFunctionInfo &
738  bool instanceMethod,
739  bool chainCall,
740  ArrayRef<CanQualType> argTypes,
743  RequiredArgs required) {
744  assert(std::all_of(argTypes.begin(), argTypes.end(),
745  [](CanQualType T) { return T.isCanonicalAsParam(); }));
746 
747  // Lookup or create unique function info.
748  llvm::FoldingSetNodeID ID;
749  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
750  required, resultType, argTypes);
751 
752  void *insertPos = nullptr;
753  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
754  if (FI)
755  return *FI;
756 
757  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
758 
759  // Construct the function info. We co-allocate the ArgInfos.
760  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
761  paramInfos, resultType, argTypes, required);
762  FunctionInfos.InsertNode(FI, insertPos);
763 
764  bool inserted = FunctionsBeingProcessed.insert(FI).second;
765  (void)inserted;
766  assert(inserted && "Recursively being processed?");
767 
768  // Compute ABI information.
769  if (CC == llvm::CallingConv::SPIR_KERNEL) {
770  // Force target independent argument handling for the host visible
771  // kernel functions.
772  computeSPIRKernelABIInfo(CGM, *FI);
773  } else if (info.getCC() == CC_Swift) {
774  swiftcall::computeABIInfo(CGM, *FI);
775  } else {
776  getABIInfo().computeInfo(*FI);
777  }
778 
779  // Loop over all of the computed argument and return value info. If any of
780  // them are direct or extend without a specified coerce type, specify the
781  // default now.
782  ABIArgInfo &retInfo = FI->getReturnInfo();
783  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
784  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
785 
786  for (auto &I : FI->arguments())
787  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
788  I.info.setCoerceToType(ConvertType(I.type));
789 
790  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
791  assert(erased && "Not in set?");
792 
793  return *FI;
794 }
795 
797  bool instanceMethod,
798  bool chainCall,
799  const FunctionType::ExtInfo &info,
800  ArrayRef<ExtParameterInfo> paramInfos,
801  CanQualType resultType,
802  ArrayRef<CanQualType> argTypes,
803  RequiredArgs required) {
804  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
805 
806  void *buffer =
807  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
808  argTypes.size() + 1, paramInfos.size()));
809 
810  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
811  FI->CallingConvention = llvmCC;
812  FI->EffectiveCallingConvention = llvmCC;
813  FI->ASTCallingConvention = info.getCC();
814  FI->InstanceMethod = instanceMethod;
815  FI->ChainCall = chainCall;
816  FI->NoReturn = info.getNoReturn();
817  FI->ReturnsRetained = info.getProducesResult();
818  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
819  FI->NoCfCheck = info.getNoCfCheck();
820  FI->Required = required;
821  FI->HasRegParm = info.getHasRegParm();
822  FI->RegParm = info.getRegParm();
823  FI->ArgStruct = nullptr;
824  FI->ArgStructAlign = 0;
825  FI->NumArgs = argTypes.size();
826  FI->HasExtParameterInfos = !paramInfos.empty();
827  FI->getArgsBuffer()[0].type = resultType;
828  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
829  FI->getArgsBuffer()[i + 1].type = argTypes[i];
830  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
831  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
832  return FI;
833 }
834 
835 /***/
836 
837 namespace {
838 // ABIArgInfo::Expand implementation.
839 
840 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
841 struct TypeExpansion {
842  enum TypeExpansionKind {
843  // Elements of constant arrays are expanded recursively.
844  TEK_ConstantArray,
845  // Record fields are expanded recursively (but if record is a union, only
846  // the field with the largest size is expanded).
847  TEK_Record,
848  // For complex types, real and imaginary parts are expanded recursively.
849  TEK_Complex,
850  // All other types are not expandable.
851  TEK_None
852  };
853 
854  const TypeExpansionKind Kind;
855 
856  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
857  virtual ~TypeExpansion() {}
858 };
859 
860 struct ConstantArrayExpansion : TypeExpansion {
861  QualType EltTy;
862  uint64_t NumElts;
863 
864  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
865  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
866  static bool classof(const TypeExpansion *TE) {
867  return TE->Kind == TEK_ConstantArray;
868  }
869 };
870 
871 struct RecordExpansion : TypeExpansion {
873 
875 
876  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
878  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
879  Fields(std::move(Fields)) {}
880  static bool classof(const TypeExpansion *TE) {
881  return TE->Kind == TEK_Record;
882  }
883 };
884 
885 struct ComplexExpansion : TypeExpansion {
886  QualType EltTy;
887 
888  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
889  static bool classof(const TypeExpansion *TE) {
890  return TE->Kind == TEK_Complex;
891  }
892 };
893 
894 struct NoExpansion : TypeExpansion {
895  NoExpansion() : TypeExpansion(TEK_None) {}
896  static bool classof(const TypeExpansion *TE) {
897  return TE->Kind == TEK_None;
898  }
899 };
900 } // namespace
901 
902 static std::unique_ptr<TypeExpansion>
903 getTypeExpansion(QualType Ty, const ASTContext &Context) {
904  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
905  return llvm::make_unique<ConstantArrayExpansion>(
906  AT->getElementType(), AT->getSize().getZExtValue());
907  }
908  if (const RecordType *RT = Ty->getAs<RecordType>()) {
909  SmallVector<const CXXBaseSpecifier *, 1> Bases;
910  SmallVector<const FieldDecl *, 1> Fields;
911  const RecordDecl *RD = RT->getDecl();
912  assert(!RD->hasFlexibleArrayMember() &&
913  "Cannot expand structure with flexible array.");
914  if (RD->isUnion()) {
915  // Unions can be here only in degenerative cases - all the fields are same
916  // after flattening. Thus we have to use the "largest" field.
917  const FieldDecl *LargestFD = nullptr;
918  CharUnits UnionSize = CharUnits::Zero();
919 
920  for (const auto *FD : RD->fields()) {
921  if (FD->isZeroLengthBitField(Context))
922  continue;
923  assert(!FD->isBitField() &&
924  "Cannot expand structure with bit-field members.");
925  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
926  if (UnionSize < FieldSize) {
927  UnionSize = FieldSize;
928  LargestFD = FD;
929  }
930  }
931  if (LargestFD)
932  Fields.push_back(LargestFD);
933  } else {
934  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
935  assert(!CXXRD->isDynamicClass() &&
936  "cannot expand vtable pointers in dynamic classes");
937  for (const CXXBaseSpecifier &BS : CXXRD->bases())
938  Bases.push_back(&BS);
939  }
940 
941  for (const auto *FD : RD->fields()) {
942  if (FD->isZeroLengthBitField(Context))
943  continue;
944  assert(!FD->isBitField() &&
945  "Cannot expand structure with bit-field members.");
946  Fields.push_back(FD);
947  }
948  }
949  return llvm::make_unique<RecordExpansion>(std::move(Bases),
950  std::move(Fields));
951  }
952  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
953  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
954  }
955  return llvm::make_unique<NoExpansion>();
956 }
957 
958 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
959  auto Exp = getTypeExpansion(Ty, Context);
960  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
961  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
962  }
963  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
964  int Res = 0;
965  for (auto BS : RExp->Bases)
966  Res += getExpansionSize(BS->getType(), Context);
967  for (auto FD : RExp->Fields)
968  Res += getExpansionSize(FD->getType(), Context);
969  return Res;
970  }
971  if (isa<ComplexExpansion>(Exp.get()))
972  return 2;
973  assert(isa<NoExpansion>(Exp.get()));
974  return 1;
975 }
976 
977 void
980  auto Exp = getTypeExpansion(Ty, Context);
981  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
982  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
983  getExpandedTypes(CAExp->EltTy, TI);
984  }
985  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
986  for (auto BS : RExp->Bases)
987  getExpandedTypes(BS->getType(), TI);
988  for (auto FD : RExp->Fields)
989  getExpandedTypes(FD->getType(), TI);
990  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
991  llvm::Type *EltTy = ConvertType(CExp->EltTy);
992  *TI++ = EltTy;
993  *TI++ = EltTy;
994  } else {
995  assert(isa<NoExpansion>(Exp.get()));
996  *TI++ = ConvertType(Ty);
997  }
998 }
999 
1001  ConstantArrayExpansion *CAE,
1002  Address BaseAddr,
1003  llvm::function_ref<void(Address)> Fn) {
1004  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1005  CharUnits EltAlign =
1006  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1007 
1008  for (int i = 0, n = CAE->NumElts; i < n; i++) {
1009  llvm::Value *EltAddr =
1010  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1011  Fn(Address(EltAddr, EltAlign));
1012  }
1013 }
1014 
1015 void CodeGenFunction::ExpandTypeFromArgs(
1017  assert(LV.isSimple() &&
1018  "Unexpected non-simple lvalue during struct expansion.");
1019 
1020  auto Exp = getTypeExpansion(Ty, getContext());
1021  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1022  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1023  [&](Address EltAddr) {
1024  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1025  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1026  });
1027  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1028  Address This = LV.getAddress();
1029  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1030  // Perform a single step derived-to-base conversion.
1031  Address Base =
1032  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1033  /*NullCheckValue=*/false, SourceLocation());
1034  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1035 
1036  // Recurse onto bases.
1037  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1038  }
1039  for (auto FD : RExp->Fields) {
1040  // FIXME: What are the right qualifiers here?
1041  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1042  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1043  }
1044  } else if (isa<ComplexExpansion>(Exp.get())) {
1045  auto realValue = *AI++;
1046  auto imagValue = *AI++;
1047  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1048  } else {
1049  assert(isa<NoExpansion>(Exp.get()));
1050  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1051  }
1052 }
1053 
1054 void CodeGenFunction::ExpandTypeToArgs(
1055  QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1056  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1057  auto Exp = getTypeExpansion(Ty, getContext());
1058  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1059  Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1062  *this, CAExp, Addr, [&](Address EltAddr) {
1063  CallArg EltArg = CallArg(
1064  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1065  CAExp->EltTy);
1066  ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1067  IRCallArgPos);
1068  });
1069  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1070  Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1072  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1073  // Perform a single step derived-to-base conversion.
1074  Address Base =
1075  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1076  /*NullCheckValue=*/false, SourceLocation());
1077  CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1078 
1079  // Recurse onto bases.
1080  ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1081  IRCallArgPos);
1082  }
1083 
1084  LValue LV = MakeAddrLValue(This, Ty);
1085  for (auto FD : RExp->Fields) {
1086  CallArg FldArg =
1087  CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1088  ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1089  IRCallArgPos);
1090  }
1091  } else if (isa<ComplexExpansion>(Exp.get())) {
1093  IRCallArgs[IRCallArgPos++] = CV.first;
1094  IRCallArgs[IRCallArgPos++] = CV.second;
1095  } else {
1096  assert(isa<NoExpansion>(Exp.get()));
1097  auto RV = Arg.getKnownRValue();
1098  assert(RV.isScalar() &&
1099  "Unexpected non-scalar rvalue during struct expansion.");
1100 
1101  // Insert a bitcast as needed.
1102  llvm::Value *V = RV.getScalarVal();
1103  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1104  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1105  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1106 
1107  IRCallArgs[IRCallArgPos++] = V;
1108  }
1109 }
1110 
1111 /// Create a temporary allocation for the purposes of coercion.
1113  CharUnits MinAlign) {
1114  // Don't use an alignment that's worse than what LLVM would prefer.
1115  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1116  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1117 
1118  return CGF.CreateTempAlloca(Ty, Align);
1119 }
1120 
1121 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1122 /// accessing some number of bytes out of it, try to gep into the struct to get
1123 /// at its inner goodness. Dive as deep as possible without entering an element
1124 /// with an in-memory size smaller than DstSize.
1125 static Address
1127  llvm::StructType *SrcSTy,
1128  uint64_t DstSize, CodeGenFunction &CGF) {
1129  // We can't dive into a zero-element struct.
1130  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1131 
1132  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1133 
1134  // If the first elt is at least as large as what we're looking for, or if the
1135  // first element is the same size as the whole struct, we can enter it. The
1136  // comparison must be made on the store size and not the alloca size. Using
1137  // the alloca size may overstate the size of the load.
1138  uint64_t FirstEltSize =
1139  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1140  if (FirstEltSize < DstSize &&
1141  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1142  return SrcPtr;
1143 
1144  // GEP into the first element.
1145  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1146 
1147  // If the first element is a struct, recurse.
1148  llvm::Type *SrcTy = SrcPtr.getElementType();
1149  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1150  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1151 
1152  return SrcPtr;
1153 }
1154 
1155 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1156 /// are either integers or pointers. This does a truncation of the value if it
1157 /// is too large or a zero extension if it is too small.
1158 ///
1159 /// This behaves as if the value were coerced through memory, so on big-endian
1160 /// targets the high bits are preserved in a truncation, while little-endian
1161 /// targets preserve the low bits.
1163  llvm::Type *Ty,
1164  CodeGenFunction &CGF) {
1165  if (Val->getType() == Ty)
1166  return Val;
1167 
1168  if (isa<llvm::PointerType>(Val->getType())) {
1169  // If this is Pointer->Pointer avoid conversion to and from int.
1170  if (isa<llvm::PointerType>(Ty))
1171  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1172 
1173  // Convert the pointer to an integer so we can play with its width.
1174  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1175  }
1176 
1177  llvm::Type *DestIntTy = Ty;
1178  if (isa<llvm::PointerType>(DestIntTy))
1179  DestIntTy = CGF.IntPtrTy;
1180 
1181  if (Val->getType() != DestIntTy) {
1182  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1183  if (DL.isBigEndian()) {
1184  // Preserve the high bits on big-endian targets.
1185  // That is what memory coercion does.
1186  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1187  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1188 
1189  if (SrcSize > DstSize) {
1190  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1191  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1192  } else {
1193  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1194  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1195  }
1196  } else {
1197  // Little-endian targets preserve the low bits. No shifts required.
1198  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1199  }
1200  }
1201 
1202  if (isa<llvm::PointerType>(Ty))
1203  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1204  return Val;
1205 }
1206 
1207 
1208 
1209 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1210 /// a pointer to an object of type \arg Ty, known to be aligned to
1211 /// \arg SrcAlign bytes.
1212 ///
1213 /// This safely handles the case when the src type is smaller than the
1214 /// destination type; in this situation the values of bits which not
1215 /// present in the src are undefined.
1217  CodeGenFunction &CGF) {
1218  llvm::Type *SrcTy = Src.getElementType();
1219 
1220  // If SrcTy and Ty are the same, just do a load.
1221  if (SrcTy == Ty)
1222  return CGF.Builder.CreateLoad(Src);
1223 
1224  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1225 
1226  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1227  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1228  SrcTy = Src.getType()->getElementType();
1229  }
1230 
1231  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1232 
1233  // If the source and destination are integer or pointer types, just do an
1234  // extension or truncation to the desired type.
1235  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1236  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1237  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1238  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1239  }
1240 
1241  // If load is legal, just bitcast the src pointer.
1242  if (SrcSize >= DstSize) {
1243  // Generally SrcSize is never greater than DstSize, since this means we are
1244  // losing bits. However, this can happen in cases where the structure has
1245  // additional padding, for example due to a user specified alignment.
1246  //
1247  // FIXME: Assert that we aren't truncating non-padding bits when have access
1248  // to that information.
1249  Src = CGF.Builder.CreateBitCast(Src,
1250  Ty->getPointerTo(Src.getAddressSpace()));
1251  return CGF.Builder.CreateLoad(Src);
1252  }
1253 
1254  // Otherwise do coercion through memory. This is stupid, but simple.
1255  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1256  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1257  Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
1258  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1259  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1260  false);
1261  return CGF.Builder.CreateLoad(Tmp);
1262 }
1263 
1264 // Function to store a first-class aggregate into memory. We prefer to
1265 // store the elements rather than the aggregate to be more friendly to
1266 // fast-isel.
1267 // FIXME: Do we need to recurse here?
1269  Address Dest, bool DestIsVolatile) {
1270  // Prefer scalar stores to first-class aggregate stores.
1271  if (llvm::StructType *STy =
1272  dyn_cast<llvm::StructType>(Val->getType())) {
1273  const llvm::StructLayout *Layout =
1274  CGF.CGM.getDataLayout().getStructLayout(STy);
1275 
1276  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1277  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1278  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1279  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1280  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1281  }
1282  } else {
1283  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1284  }
1285 }
1286 
1287 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1288 /// where the source and destination may have different types. The
1289 /// destination is known to be aligned to \arg DstAlign bytes.
1290 ///
1291 /// This safely handles the case when the src type is larger than the
1292 /// destination type; the upper bits of the src will be lost.
1294  Address Dst,
1295  bool DstIsVolatile,
1296  CodeGenFunction &CGF) {
1297  llvm::Type *SrcTy = Src->getType();
1298  llvm::Type *DstTy = Dst.getType()->getElementType();
1299  if (SrcTy == DstTy) {
1300  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1301  return;
1302  }
1303 
1304  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1305 
1306  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1307  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1308  DstTy = Dst.getType()->getElementType();
1309  }
1310 
1311  // If the source and destination are integer or pointer types, just do an
1312  // extension or truncation to the desired type.
1313  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1314  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1315  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1316  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1317  return;
1318  }
1319 
1320  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1321 
1322  // If store is legal, just bitcast the src pointer.
1323  if (SrcSize <= DstSize) {
1324  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1325  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1326  } else {
1327  // Otherwise do coercion through memory. This is stupid, but
1328  // simple.
1329 
1330  // Generally SrcSize is never greater than DstSize, since this means we are
1331  // losing bits. However, this can happen in cases where the structure has
1332  // additional padding, for example due to a user specified alignment.
1333  //
1334  // FIXME: Assert that we aren't truncating non-padding bits when have access
1335  // to that information.
1336  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1337  CGF.Builder.CreateStore(Src, Tmp);
1338  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1339  Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
1340  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1341  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1342  false);
1343  }
1344 }
1345 
1347  const ABIArgInfo &info) {
1348  if (unsigned offset = info.getDirectOffset()) {
1349  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1350  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1351  CharUnits::fromQuantity(offset));
1352  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1353  }
1354  return addr;
1355 }
1356 
1357 namespace {
1358 
1359 /// Encapsulates information about the way function arguments from
1360 /// CGFunctionInfo should be passed to actual LLVM IR function.
1361 class ClangToLLVMArgMapping {
1362  static const unsigned InvalidIndex = ~0U;
1363  unsigned InallocaArgNo;
1364  unsigned SRetArgNo;
1365  unsigned TotalIRArgs;
1366 
1367  /// Arguments of LLVM IR function corresponding to single Clang argument.
1368  struct IRArgs {
1369  unsigned PaddingArgIndex;
1370  // Argument is expanded to IR arguments at positions
1371  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1372  unsigned FirstArgIndex;
1373  unsigned NumberOfArgs;
1374 
1375  IRArgs()
1376  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1377  NumberOfArgs(0) {}
1378  };
1379 
1380  SmallVector<IRArgs, 8> ArgInfo;
1381 
1382 public:
1383  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1384  bool OnlyRequiredArgs = false)
1385  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1386  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1387  construct(Context, FI, OnlyRequiredArgs);
1388  }
1389 
1390  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1391  unsigned getInallocaArgNo() const {
1392  assert(hasInallocaArg());
1393  return InallocaArgNo;
1394  }
1395 
1396  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1397  unsigned getSRetArgNo() const {
1398  assert(hasSRetArg());
1399  return SRetArgNo;
1400  }
1401 
1402  unsigned totalIRArgs() const { return TotalIRArgs; }
1403 
1404  bool hasPaddingArg(unsigned ArgNo) const {
1405  assert(ArgNo < ArgInfo.size());
1406  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1407  }
1408  unsigned getPaddingArgNo(unsigned ArgNo) const {
1409  assert(hasPaddingArg(ArgNo));
1410  return ArgInfo[ArgNo].PaddingArgIndex;
1411  }
1412 
1413  /// Returns index of first IR argument corresponding to ArgNo, and their
1414  /// quantity.
1415  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1416  assert(ArgNo < ArgInfo.size());
1417  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1418  ArgInfo[ArgNo].NumberOfArgs);
1419  }
1420 
1421 private:
1422  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1423  bool OnlyRequiredArgs);
1424 };
1425 
1426 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1427  const CGFunctionInfo &FI,
1428  bool OnlyRequiredArgs) {
1429  unsigned IRArgNo = 0;
1430  bool SwapThisWithSRet = false;
1431  const ABIArgInfo &RetAI = FI.getReturnInfo();
1432 
1433  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1434  SwapThisWithSRet = RetAI.isSRetAfterThis();
1435  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1436  }
1437 
1438  unsigned ArgNo = 0;
1439  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1440  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1441  ++I, ++ArgNo) {
1442  assert(I != FI.arg_end());
1443  QualType ArgType = I->type;
1444  const ABIArgInfo &AI = I->info;
1445  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1446  auto &IRArgs = ArgInfo[ArgNo];
1447 
1448  if (AI.getPaddingType())
1449  IRArgs.PaddingArgIndex = IRArgNo++;
1450 
1451  switch (AI.getKind()) {
1452  case ABIArgInfo::Extend:
1453  case ABIArgInfo::Direct: {
1454  // FIXME: handle sseregparm someday...
1455  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1456  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1457  IRArgs.NumberOfArgs = STy->getNumElements();
1458  } else {
1459  IRArgs.NumberOfArgs = 1;
1460  }
1461  break;
1462  }
1463  case ABIArgInfo::Indirect:
1464  IRArgs.NumberOfArgs = 1;
1465  break;
1466  case ABIArgInfo::Ignore:
1467  case ABIArgInfo::InAlloca:
1468  // ignore and inalloca doesn't have matching LLVM parameters.
1469  IRArgs.NumberOfArgs = 0;
1470  break;
1472  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1473  break;
1474  case ABIArgInfo::Expand:
1475  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1476  break;
1477  }
1478 
1479  if (IRArgs.NumberOfArgs > 0) {
1480  IRArgs.FirstArgIndex = IRArgNo;
1481  IRArgNo += IRArgs.NumberOfArgs;
1482  }
1483 
1484  // Skip over the sret parameter when it comes second. We already handled it
1485  // above.
1486  if (IRArgNo == 1 && SwapThisWithSRet)
1487  IRArgNo++;
1488  }
1489  assert(ArgNo == ArgInfo.size());
1490 
1491  if (FI.usesInAlloca())
1492  InallocaArgNo = IRArgNo++;
1493 
1494  TotalIRArgs = IRArgNo;
1495 }
1496 } // namespace
1497 
1498 /***/
1499 
1501  const auto &RI = FI.getReturnInfo();
1502  return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1503 }
1504 
1506  return ReturnTypeUsesSRet(FI) &&
1507  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1508 }
1509 
1511  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1512  switch (BT->getKind()) {
1513  default:
1514  return false;
1515  case BuiltinType::Float:
1517  case BuiltinType::Double:
1519  case BuiltinType::LongDouble:
1521  }
1522  }
1523 
1524  return false;
1525 }
1526 
1528  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1529  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1530  if (BT->getKind() == BuiltinType::LongDouble)
1532  }
1533  }
1534 
1535  return false;
1536 }
1537 
1539  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1540  return GetFunctionType(FI);
1541 }
1542 
1543 llvm::FunctionType *
1545 
1546  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1547  (void)Inserted;
1548  assert(Inserted && "Recursively being processed?");
1549 
1550  llvm::Type *resultType = nullptr;
1551  const ABIArgInfo &retAI = FI.getReturnInfo();
1552  switch (retAI.getKind()) {
1553  case ABIArgInfo::Expand:
1554  llvm_unreachable("Invalid ABI kind for return argument");
1555 
1556  case ABIArgInfo::Extend:
1557  case ABIArgInfo::Direct:
1558  resultType = retAI.getCoerceToType();
1559  break;
1560 
1561  case ABIArgInfo::InAlloca:
1562  if (retAI.getInAllocaSRet()) {
1563  // sret things on win32 aren't void, they return the sret pointer.
1564  QualType ret = FI.getReturnType();
1565  llvm::Type *ty = ConvertType(ret);
1566  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1567  resultType = llvm::PointerType::get(ty, addressSpace);
1568  } else {
1569  resultType = llvm::Type::getVoidTy(getLLVMContext());
1570  }
1571  break;
1572 
1573  case ABIArgInfo::Indirect:
1574  case ABIArgInfo::Ignore:
1575  resultType = llvm::Type::getVoidTy(getLLVMContext());
1576  break;
1577 
1579  resultType = retAI.getUnpaddedCoerceAndExpandType();
1580  break;
1581  }
1582 
1583  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1584  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1585 
1586  // Add type for sret argument.
1587  if (IRFunctionArgs.hasSRetArg()) {
1588  QualType Ret = FI.getReturnType();
1589  llvm::Type *Ty = ConvertType(Ret);
1590  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1591  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1592  llvm::PointerType::get(Ty, AddressSpace);
1593  }
1594 
1595  // Add type for inalloca argument.
1596  if (IRFunctionArgs.hasInallocaArg()) {
1597  auto ArgStruct = FI.getArgStruct();
1598  assert(ArgStruct);
1599  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1600  }
1601 
1602  // Add in all of the required arguments.
1603  unsigned ArgNo = 0;
1605  ie = it + FI.getNumRequiredArgs();
1606  for (; it != ie; ++it, ++ArgNo) {
1607  const ABIArgInfo &ArgInfo = it->info;
1608 
1609  // Insert a padding type to ensure proper alignment.
1610  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1611  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1612  ArgInfo.getPaddingType();
1613 
1614  unsigned FirstIRArg, NumIRArgs;
1615  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1616 
1617  switch (ArgInfo.getKind()) {
1618  case ABIArgInfo::Ignore:
1619  case ABIArgInfo::InAlloca:
1620  assert(NumIRArgs == 0);
1621  break;
1622 
1623  case ABIArgInfo::Indirect: {
1624  assert(NumIRArgs == 1);
1625  // indirect arguments are always on the stack, which is alloca addr space.
1626  llvm::Type *LTy = ConvertTypeForMem(it->type);
1627  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1628  CGM.getDataLayout().getAllocaAddrSpace());
1629  break;
1630  }
1631 
1632  case ABIArgInfo::Extend:
1633  case ABIArgInfo::Direct: {
1634  // Fast-isel and the optimizer generally like scalar values better than
1635  // FCAs, so we flatten them if this is safe to do for this argument.
1636  llvm::Type *argType = ArgInfo.getCoerceToType();
1637  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1638  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1639  assert(NumIRArgs == st->getNumElements());
1640  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1641  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1642  } else {
1643  assert(NumIRArgs == 1);
1644  ArgTypes[FirstIRArg] = argType;
1645  }
1646  break;
1647  }
1648 
1650  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1651  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1652  *ArgTypesIter++ = EltTy;
1653  }
1654  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1655  break;
1656  }
1657 
1658  case ABIArgInfo::Expand:
1659  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1660  getExpandedTypes(it->type, ArgTypesIter);
1661  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1662  break;
1663  }
1664  }
1665 
1666  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1667  assert(Erased && "Not in set?");
1668 
1669  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1670 }
1671 
1673  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1674  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1675 
1676  if (!isFuncTypeConvertible(FPT))
1677  return llvm::StructType::get(getLLVMContext());
1678 
1679  const CGFunctionInfo *Info;
1680  if (isa<CXXDestructorDecl>(MD))
1681  Info =
1683  else
1684  Info = &arrangeCXXMethodDeclaration(MD);
1685  return GetFunctionType(*Info);
1686 }
1687 
1689  llvm::AttrBuilder &FuncAttrs,
1690  const FunctionProtoType *FPT) {
1691  if (!FPT)
1692  return;
1693 
1695  FPT->isNothrow())
1696  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1697 }
1698 
1699 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1700  bool AttrOnCallSite,
1701  llvm::AttrBuilder &FuncAttrs) {
1702  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1703  if (!HasOptnone) {
1704  if (CodeGenOpts.OptimizeSize)
1705  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1706  if (CodeGenOpts.OptimizeSize == 2)
1707  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1708  }
1709 
1710  if (CodeGenOpts.DisableRedZone)
1711  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1712  if (CodeGenOpts.NoImplicitFloat)
1713  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1714 
1715  if (AttrOnCallSite) {
1716  // Attributes that should go on the call site only.
1717  if (!CodeGenOpts.SimplifyLibCalls ||
1718  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1719  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1720  if (!CodeGenOpts.TrapFuncName.empty())
1721  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1722  } else {
1723  // Attributes that should go on the function, but not the call site.
1724  if (!CodeGenOpts.DisableFPElim) {
1725  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1726  } else if (CodeGenOpts.OmitLeafFramePointer) {
1727  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1728  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1729  } else {
1730  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1731  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1732  }
1733 
1734  FuncAttrs.addAttribute("less-precise-fpmad",
1735  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1736 
1737  if (CodeGenOpts.NullPointerIsValid)
1738  FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1739  if (!CodeGenOpts.FPDenormalMode.empty())
1740  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1741 
1742  FuncAttrs.addAttribute("no-trapping-math",
1743  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1744 
1745  // Strict (compliant) code is the default, so only add this attribute to
1746  // indicate that we are trying to workaround a problem case.
1747  if (!CodeGenOpts.StrictFloatCastOverflow)
1748  FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1749 
1750  // TODO: Are these all needed?
1751  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1752  FuncAttrs.addAttribute("no-infs-fp-math",
1753  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1754  FuncAttrs.addAttribute("no-nans-fp-math",
1755  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1756  FuncAttrs.addAttribute("unsafe-fp-math",
1757  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1758  FuncAttrs.addAttribute("use-soft-float",
1759  llvm::toStringRef(CodeGenOpts.SoftFloat));
1760  FuncAttrs.addAttribute("stack-protector-buffer-size",
1761  llvm::utostr(CodeGenOpts.SSPBufferSize));
1762  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1763  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1764  FuncAttrs.addAttribute(
1765  "correctly-rounded-divide-sqrt-fp-math",
1766  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1767 
1768  if (getLangOpts().OpenCL)
1769  FuncAttrs.addAttribute("denorms-are-zero",
1770  llvm::toStringRef(CodeGenOpts.FlushDenorm));
1771 
1772  // TODO: Reciprocal estimate codegen options should apply to instructions?
1773  const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1774  if (!Recips.empty())
1775  FuncAttrs.addAttribute("reciprocal-estimates",
1776  llvm::join(Recips, ","));
1777 
1778  if (!CodeGenOpts.PreferVectorWidth.empty() &&
1779  CodeGenOpts.PreferVectorWidth != "none")
1780  FuncAttrs.addAttribute("prefer-vector-width",
1781  CodeGenOpts.PreferVectorWidth);
1782 
1783  if (CodeGenOpts.StackRealignment)
1784  FuncAttrs.addAttribute("stackrealign");
1785  if (CodeGenOpts.Backchain)
1786  FuncAttrs.addAttribute("backchain");
1787  }
1788 
1789  if (getLangOpts().assumeFunctionsAreConvergent()) {
1790  // Conservatively, mark all functions and calls in CUDA and OpenCL as
1791  // convergent (meaning, they may call an intrinsically convergent op, such
1792  // as __syncthreads() / barrier(), and so can't have certain optimizations
1793  // applied around them). LLVM will remove this attribute where it safely
1794  // can.
1795  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1796  }
1797 
1798  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1799  // Exceptions aren't supported in CUDA device code.
1800  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1801 
1802  // Respect -fcuda-flush-denormals-to-zero.
1803  if (CodeGenOpts.FlushDenorm)
1804  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1805  }
1806 }
1807 
1808 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1809  llvm::AttrBuilder FuncAttrs;
1810  ConstructDefaultFnAttrList(F.getName(),
1811  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1812  /* AttrOnCallsite = */ false, FuncAttrs);
1813  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1814 }
1815 
1817  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1818  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1819  llvm::AttrBuilder FuncAttrs;
1820  llvm::AttrBuilder RetAttrs;
1821 
1822  CallingConv = FI.getEffectiveCallingConvention();
1823  if (FI.isNoReturn())
1824  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1825 
1826  // If we have information about the function prototype, we can learn
1827  // attributes from there.
1829  CalleeInfo.getCalleeFunctionProtoType());
1830 
1831  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1832 
1833  bool HasOptnone = false;
1834  // FIXME: handle sseregparm someday...
1835  if (TargetDecl) {
1836  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1837  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1838  if (TargetDecl->hasAttr<NoThrowAttr>())
1839  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1840  if (TargetDecl->hasAttr<NoReturnAttr>())
1841  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1842  if (TargetDecl->hasAttr<ColdAttr>())
1843  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1844  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1845  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1846  if (TargetDecl->hasAttr<ConvergentAttr>())
1847  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1848 
1849  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1851  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1852  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1853  // These attributes are not inherited by overloads.
1854  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1855  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1856  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1857  }
1858 
1859  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1860  if (TargetDecl->hasAttr<ConstAttr>()) {
1861  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1862  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1863  } else if (TargetDecl->hasAttr<PureAttr>()) {
1864  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1865  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1866  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1867  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1868  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1869  }
1870  if (TargetDecl->hasAttr<RestrictAttr>())
1871  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1872  if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1873  !CodeGenOpts.NullPointerIsValid)
1874  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1875  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1876  FuncAttrs.addAttribute("no_caller_saved_registers");
1877  if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1878  FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1879 
1880  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1881  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1882  Optional<unsigned> NumElemsParam;
1883  if (AllocSize->getNumElemsParam().isValid())
1884  NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1885  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1886  NumElemsParam);
1887  }
1888  }
1889 
1890  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1891 
1892  if (CodeGenOpts.EnableSegmentedStacks &&
1893  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1894  FuncAttrs.addAttribute("split-stack");
1895 
1896  // Add NonLazyBind attribute to function declarations when -fno-plt
1897  // is used.
1898  if (TargetDecl && CodeGenOpts.NoPLT) {
1899  if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1900  if (!Fn->isDefined() && !AttrOnCallSite) {
1901  FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1902  }
1903  }
1904  }
1905 
1906  if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1907  if (getLangOpts().OpenCLVersion <= 120) {
1908  // OpenCL v1.2 Work groups are always uniform
1909  FuncAttrs.addAttribute("uniform-work-group-size", "true");
1910  } else {
1911  // OpenCL v2.0 Work groups may be whether uniform or not.
1912  // '-cl-uniform-work-group-size' compile option gets a hint
1913  // to the compiler that the global work-size be a multiple of
1914  // the work-group size specified to clEnqueueNDRangeKernel
1915  // (i.e. work groups are uniform).
1916  FuncAttrs.addAttribute("uniform-work-group-size",
1917  llvm::toStringRef(CodeGenOpts.UniformWGSize));
1918  }
1919  }
1920 
1921  if (!AttrOnCallSite) {
1922  bool DisableTailCalls = false;
1923 
1924  if (CodeGenOpts.DisableTailCalls)
1925  DisableTailCalls = true;
1926  else if (TargetDecl) {
1927  if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1928  TargetDecl->hasAttr<AnyX86InterruptAttr>())
1929  DisableTailCalls = true;
1930  else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1931  if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1932  if (!BD->doesNotEscape())
1933  DisableTailCalls = true;
1934  }
1935  }
1936 
1937  FuncAttrs.addAttribute("disable-tail-calls",
1938  llvm::toStringRef(DisableTailCalls));
1939  GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
1940  }
1941 
1942  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1943 
1944  QualType RetTy = FI.getReturnType();
1945  const ABIArgInfo &RetAI = FI.getReturnInfo();
1946  switch (RetAI.getKind()) {
1947  case ABIArgInfo::Extend:
1948  if (RetAI.isSignExt())
1949  RetAttrs.addAttribute(llvm::Attribute::SExt);
1950  else
1951  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1952  LLVM_FALLTHROUGH;
1953  case ABIArgInfo::Direct:
1954  if (RetAI.getInReg())
1955  RetAttrs.addAttribute(llvm::Attribute::InReg);
1956  break;
1957  case ABIArgInfo::Ignore:
1958  break;
1959 
1960  case ABIArgInfo::InAlloca:
1961  case ABIArgInfo::Indirect: {
1962  // inalloca and sret disable readnone and readonly
1963  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1964  .removeAttribute(llvm::Attribute::ReadNone);
1965  break;
1966  }
1967 
1969  break;
1970 
1971  case ABIArgInfo::Expand:
1972  llvm_unreachable("Invalid ABI kind for return argument");
1973  }
1974 
1975  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1976  QualType PTy = RefTy->getPointeeType();
1977  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1978  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1979  .getQuantity());
1980  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
1981  !CodeGenOpts.NullPointerIsValid)
1982  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1983  }
1984 
1985  bool hasUsedSRet = false;
1986  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1987 
1988  // Attach attributes to sret.
1989  if (IRFunctionArgs.hasSRetArg()) {
1990  llvm::AttrBuilder SRETAttrs;
1991  if (!RetAI.getSuppressSRet())
1992  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1993  hasUsedSRet = true;
1994  if (RetAI.getInReg())
1995  SRETAttrs.addAttribute(llvm::Attribute::InReg);
1996  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
1997  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
1998  }
1999 
2000  // Attach attributes to inalloca argument.
2001  if (IRFunctionArgs.hasInallocaArg()) {
2002  llvm::AttrBuilder Attrs;
2003  Attrs.addAttribute(llvm::Attribute::InAlloca);
2004  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2005  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2006  }
2007 
2008  unsigned ArgNo = 0;
2010  E = FI.arg_end();
2011  I != E; ++I, ++ArgNo) {
2012  QualType ParamType = I->type;
2013  const ABIArgInfo &AI = I->info;
2014  llvm::AttrBuilder Attrs;
2015 
2016  // Add attribute for padding argument, if necessary.
2017  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2018  if (AI.getPaddingInReg()) {
2019  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2020  llvm::AttributeSet::get(
2021  getLLVMContext(),
2022  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2023  }
2024  }
2025 
2026  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2027  // have the corresponding parameter variable. It doesn't make
2028  // sense to do it here because parameters are so messed up.
2029  switch (AI.getKind()) {
2030  case ABIArgInfo::Extend:
2031  if (AI.isSignExt())
2032  Attrs.addAttribute(llvm::Attribute::SExt);
2033  else
2034  Attrs.addAttribute(llvm::Attribute::ZExt);
2035  LLVM_FALLTHROUGH;
2036  case ABIArgInfo::Direct:
2037  if (ArgNo == 0 && FI.isChainCall())
2038  Attrs.addAttribute(llvm::Attribute::Nest);
2039  else if (AI.getInReg())
2040  Attrs.addAttribute(llvm::Attribute::InReg);
2041  break;
2042 
2043  case ABIArgInfo::Indirect: {
2044  if (AI.getInReg())
2045  Attrs.addAttribute(llvm::Attribute::InReg);
2046 
2047  if (AI.getIndirectByVal())
2048  Attrs.addAttribute(llvm::Attribute::ByVal);
2049 
2050  CharUnits Align = AI.getIndirectAlign();
2051 
2052  // In a byval argument, it is important that the required
2053  // alignment of the type is honored, as LLVM might be creating a
2054  // *new* stack object, and needs to know what alignment to give
2055  // it. (Sometimes it can deduce a sensible alignment on its own,
2056  // but not if clang decides it must emit a packed struct, or the
2057  // user specifies increased alignment requirements.)
2058  //
2059  // This is different from indirect *not* byval, where the object
2060  // exists already, and the align attribute is purely
2061  // informative.
2062  assert(!Align.isZero());
2063 
2064  // For now, only add this when we have a byval argument.
2065  // TODO: be less lazy about updating test cases.
2066  if (AI.getIndirectByVal())
2067  Attrs.addAlignmentAttr(Align.getQuantity());
2068 
2069  // byval disables readnone and readonly.
2070  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2071  .removeAttribute(llvm::Attribute::ReadNone);
2072  break;
2073  }
2074  case ABIArgInfo::Ignore:
2075  case ABIArgInfo::Expand:
2077  break;
2078 
2079  case ABIArgInfo::InAlloca:
2080  // inalloca disables readnone and readonly.
2081  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2082  .removeAttribute(llvm::Attribute::ReadNone);
2083  continue;
2084  }
2085 
2086  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2087  QualType PTy = RefTy->getPointeeType();
2088  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2089  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2090  .getQuantity());
2091  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2092  !CodeGenOpts.NullPointerIsValid)
2093  Attrs.addAttribute(llvm::Attribute::NonNull);
2094  }
2095 
2096  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2098  break;
2099 
2101  // Add 'sret' if we haven't already used it for something, but
2102  // only if the result is void.
2103  if (!hasUsedSRet && RetTy->isVoidType()) {
2104  Attrs.addAttribute(llvm::Attribute::StructRet);
2105  hasUsedSRet = true;
2106  }
2107 
2108  // Add 'noalias' in either case.
2109  Attrs.addAttribute(llvm::Attribute::NoAlias);
2110 
2111  // Add 'dereferenceable' and 'alignment'.
2112  auto PTy = ParamType->getPointeeType();
2113  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2114  auto info = getContext().getTypeInfoInChars(PTy);
2115  Attrs.addDereferenceableAttr(info.first.getQuantity());
2116  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2117  info.second.getQuantity()));
2118  }
2119  break;
2120  }
2121 
2123  Attrs.addAttribute(llvm::Attribute::SwiftError);
2124  break;
2125 
2127  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2128  break;
2129  }
2130 
2131  if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2132  Attrs.addAttribute(llvm::Attribute::NoCapture);
2133 
2134  if (Attrs.hasAttributes()) {
2135  unsigned FirstIRArg, NumIRArgs;
2136  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2137  for (unsigned i = 0; i < NumIRArgs; i++)
2138  ArgAttrs[FirstIRArg + i] =
2139  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2140  }
2141  }
2142  assert(ArgNo == FI.arg_size());
2143 
2144  AttrList = llvm::AttributeList::get(
2145  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2146  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2147 }
2148 
2149 /// An argument came in as a promoted argument; demote it back to its
2150 /// declared type.
2152  const VarDecl *var,
2153  llvm::Value *value) {
2154  llvm::Type *varType = CGF.ConvertType(var->getType());
2155 
2156  // This can happen with promotions that actually don't change the
2157  // underlying type, like the enum promotions.
2158  if (value->getType() == varType) return value;
2159 
2160  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2161  && "unexpected promotion type");
2162 
2163  if (isa<llvm::IntegerType>(varType))
2164  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2165 
2166  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2167 }
2168 
2169 /// Returns the attribute (either parameter attribute, or function
2170 /// attribute), which declares argument ArgNo to be non-null.
2171 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2172  QualType ArgType, unsigned ArgNo) {
2173  // FIXME: __attribute__((nonnull)) can also be applied to:
2174  // - references to pointers, where the pointee is known to be
2175  // nonnull (apparently a Clang extension)
2176  // - transparent unions containing pointers
2177  // In the former case, LLVM IR cannot represent the constraint. In
2178  // the latter case, we have no guarantee that the transparent union
2179  // is in fact passed as a pointer.
2180  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2181  return nullptr;
2182  // First, check attribute on parameter itself.
2183  if (PVD) {
2184  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2185  return ParmNNAttr;
2186  }
2187  // Check function attributes.
2188  if (!FD)
2189  return nullptr;
2190  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2191  if (NNAttr->isNonNull(ArgNo))
2192  return NNAttr;
2193  }
2194  return nullptr;
2195 }
2196 
2197 namespace {
2198  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2199  Address Temp;
2200  Address Arg;
2201  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2202  void Emit(CodeGenFunction &CGF, Flags flags) override {
2203  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2204  CGF.Builder.CreateStore(errorValue, Arg);
2205  }
2206  };
2207 }
2208 
2210  llvm::Function *Fn,
2211  const FunctionArgList &Args) {
2212  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2213  // Naked functions don't have prologues.
2214  return;
2215 
2216  // If this is an implicit-return-zero function, go ahead and
2217  // initialize the return value. TODO: it might be nice to have
2218  // a more general mechanism for this that didn't require synthesized
2219  // return statements.
2220  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2221  if (FD->hasImplicitReturnZero()) {
2222  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2223  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2224  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2225  Builder.CreateStore(Zero, ReturnValue);
2226  }
2227  }
2228 
2229  // FIXME: We no longer need the types from FunctionArgList; lift up and
2230  // simplify.
2231 
2232  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2233  // Flattened function arguments.
2235  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2236  for (auto &Arg : Fn->args()) {
2237  FnArgs.push_back(&Arg);
2238  }
2239  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2240 
2241  // If we're using inalloca, all the memory arguments are GEPs off of the last
2242  // parameter, which is a pointer to the complete memory area.
2243  Address ArgStruct = Address::invalid();
2244  const llvm::StructLayout *ArgStructLayout = nullptr;
2245  if (IRFunctionArgs.hasInallocaArg()) {
2246  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2247  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2248  FI.getArgStructAlignment());
2249 
2250  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2251  }
2252 
2253  // Name the struct return parameter.
2254  if (IRFunctionArgs.hasSRetArg()) {
2255  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2256  AI->setName("agg.result");
2257  AI->addAttr(llvm::Attribute::NoAlias);
2258  }
2259 
2260  // Track if we received the parameter as a pointer (indirect, byval, or
2261  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2262  // into a local alloca for us.
2264  ArgVals.reserve(Args.size());
2265 
2266  // Create a pointer value for every parameter declaration. This usually
2267  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2268  // any cleanups or do anything that might unwind. We do that separately, so
2269  // we can push the cleanups in the correct order for the ABI.
2270  assert(FI.arg_size() == Args.size() &&
2271  "Mismatch between function signature & arguments.");
2272  unsigned ArgNo = 0;
2274  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2275  i != e; ++i, ++info_it, ++ArgNo) {
2276  const VarDecl *Arg = *i;
2277  const ABIArgInfo &ArgI = info_it->info;
2278 
2279  bool isPromoted =
2280  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2281  // We are converting from ABIArgInfo type to VarDecl type directly, unless
2282  // the parameter is promoted. In this case we convert to
2283  // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2284  QualType Ty = isPromoted ? info_it->type : Arg->getType();
2285  assert(hasScalarEvaluationKind(Ty) ==
2286  hasScalarEvaluationKind(Arg->getType()));
2287 
2288  unsigned FirstIRArg, NumIRArgs;
2289  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2290 
2291  switch (ArgI.getKind()) {
2292  case ABIArgInfo::InAlloca: {
2293  assert(NumIRArgs == 0);
2294  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2295  CharUnits FieldOffset =
2296  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2297  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2298  Arg->getName());
2299  ArgVals.push_back(ParamValue::forIndirect(V));
2300  break;
2301  }
2302 
2303  case ABIArgInfo::Indirect: {
2304  assert(NumIRArgs == 1);
2305  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2306 
2307  if (!hasScalarEvaluationKind(Ty)) {
2308  // Aggregates and complex variables are accessed by reference. All we
2309  // need to do is realign the value, if requested.
2310  Address V = ParamAddr;
2311  if (ArgI.getIndirectRealign()) {
2312  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2313 
2314  // Copy from the incoming argument pointer to the temporary with the
2315  // appropriate alignment.
2316  //
2317  // FIXME: We should have a common utility for generating an aggregate
2318  // copy.
2320  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2321  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2322  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2323  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2324  V = AlignedTemp;
2325  }
2326  ArgVals.push_back(ParamValue::forIndirect(V));
2327  } else {
2328  // Load scalar value from indirect argument.
2329  llvm::Value *V =
2330  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2331 
2332  if (isPromoted)
2333  V = emitArgumentDemotion(*this, Arg, V);
2334  ArgVals.push_back(ParamValue::forDirect(V));
2335  }
2336  break;
2337  }
2338 
2339  case ABIArgInfo::Extend:
2340  case ABIArgInfo::Direct: {
2341 
2342  // If we have the trivial case, handle it with no muss and fuss.
2343  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2344  ArgI.getCoerceToType() == ConvertType(Ty) &&
2345  ArgI.getDirectOffset() == 0) {
2346  assert(NumIRArgs == 1);
2347  llvm::Value *V = FnArgs[FirstIRArg];
2348  auto AI = cast<llvm::Argument>(V);
2349 
2350  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2351  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2352  PVD->getFunctionScopeIndex()) &&
2353  !CGM.getCodeGenOpts().NullPointerIsValid)
2354  AI->addAttr(llvm::Attribute::NonNull);
2355 
2356  QualType OTy = PVD->getOriginalType();
2357  if (const auto *ArrTy =
2358  getContext().getAsConstantArrayType(OTy)) {
2359  // A C99 array parameter declaration with the static keyword also
2360  // indicates dereferenceability, and if the size is constant we can
2361  // use the dereferenceable attribute (which requires the size in
2362  // bytes).
2363  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2364  QualType ETy = ArrTy->getElementType();
2365  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2366  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2367  ArrSize) {
2368  llvm::AttrBuilder Attrs;
2369  Attrs.addDereferenceableAttr(
2370  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2371  AI->addAttrs(Attrs);
2372  } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2373  !CGM.getCodeGenOpts().NullPointerIsValid) {
2374  AI->addAttr(llvm::Attribute::NonNull);
2375  }
2376  }
2377  } else if (const auto *ArrTy =
2378  getContext().getAsVariableArrayType(OTy)) {
2379  // For C99 VLAs with the static keyword, we don't know the size so
2380  // we can't use the dereferenceable attribute, but in addrspace(0)
2381  // we know that it must be nonnull.
2382  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2383  !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2384  !CGM.getCodeGenOpts().NullPointerIsValid)
2385  AI->addAttr(llvm::Attribute::NonNull);
2386  }
2387 
2388  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2389  if (!AVAttr)
2390  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2391  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2392  if (AVAttr) {
2393  llvm::Value *AlignmentValue =
2394  EmitScalarExpr(AVAttr->getAlignment());
2395  llvm::ConstantInt *AlignmentCI =
2396  cast<llvm::ConstantInt>(AlignmentValue);
2397  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2398  +llvm::Value::MaximumAlignment);
2399  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2400  }
2401  }
2402 
2403  if (Arg->getType().isRestrictQualified())
2404  AI->addAttr(llvm::Attribute::NoAlias);
2405 
2406  // LLVM expects swifterror parameters to be used in very restricted
2407  // ways. Copy the value into a less-restricted temporary.
2408  if (FI.getExtParameterInfo(ArgNo).getABI()
2410  QualType pointeeTy = Ty->getPointeeType();
2411  assert(pointeeTy->isPointerType());
2412  Address temp =
2413  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2414  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2415  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2416  Builder.CreateStore(incomingErrorValue, temp);
2417  V = temp.getPointer();
2418 
2419  // Push a cleanup to copy the value back at the end of the function.
2420  // The convention does not guarantee that the value will be written
2421  // back if the function exits with an unwind exception.
2422  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2423  }
2424 
2425  // Ensure the argument is the correct type.
2426  if (V->getType() != ArgI.getCoerceToType())
2427  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2428 
2429  if (isPromoted)
2430  V = emitArgumentDemotion(*this, Arg, V);
2431 
2432  // Because of merging of function types from multiple decls it is
2433  // possible for the type of an argument to not match the corresponding
2434  // type in the function type. Since we are codegening the callee
2435  // in here, add a cast to the argument type.
2436  llvm::Type *LTy = ConvertType(Arg->getType());
2437  if (V->getType() != LTy)
2438  V = Builder.CreateBitCast(V, LTy);
2439 
2440  ArgVals.push_back(ParamValue::forDirect(V));
2441  break;
2442  }
2443 
2444  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2445  Arg->getName());
2446 
2447  // Pointer to store into.
2448  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2449 
2450  // Fast-isel and the optimizer generally like scalar values better than
2451  // FCAs, so we flatten them if this is safe to do for this argument.
2452  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2453  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2454  STy->getNumElements() > 1) {
2455  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2456  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2457  llvm::Type *DstTy = Ptr.getElementType();
2458  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2459 
2460  Address AddrToStoreInto = Address::invalid();
2461  if (SrcSize <= DstSize) {
2462  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2463  } else {
2464  AddrToStoreInto =
2465  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2466  }
2467 
2468  assert(STy->getNumElements() == NumIRArgs);
2469  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2470  auto AI = FnArgs[FirstIRArg + i];
2471  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2472  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2473  Address EltPtr =
2474  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2475  Builder.CreateStore(AI, EltPtr);
2476  }
2477 
2478  if (SrcSize > DstSize) {
2479  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2480  }
2481 
2482  } else {
2483  // Simple case, just do a coerced store of the argument into the alloca.
2484  assert(NumIRArgs == 1);
2485  auto AI = FnArgs[FirstIRArg];
2486  AI->setName(Arg->getName() + ".coerce");
2487  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2488  }
2489 
2490  // Match to what EmitParmDecl is expecting for this type.
2492  llvm::Value *V =
2493  EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2494  if (isPromoted)
2495  V = emitArgumentDemotion(*this, Arg, V);
2496  ArgVals.push_back(ParamValue::forDirect(V));
2497  } else {
2498  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2499  }
2500  break;
2501  }
2502 
2504  // Reconstruct into a temporary.
2505  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2506  ArgVals.push_back(ParamValue::forIndirect(alloca));
2507 
2508  auto coercionType = ArgI.getCoerceAndExpandType();
2509  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2510  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2511 
2512  unsigned argIndex = FirstIRArg;
2513  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2514  llvm::Type *eltType = coercionType->getElementType(i);
2516  continue;
2517 
2518  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2519  auto elt = FnArgs[argIndex++];
2520  Builder.CreateStore(elt, eltAddr);
2521  }
2522  assert(argIndex == FirstIRArg + NumIRArgs);
2523  break;
2524  }
2525 
2526  case ABIArgInfo::Expand: {
2527  // If this structure was expanded into multiple arguments then
2528  // we need to create a temporary and reconstruct it from the
2529  // arguments.
2530  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2531  LValue LV = MakeAddrLValue(Alloca, Ty);
2532  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2533 
2534  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2535  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2536  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2537  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2538  auto AI = FnArgs[FirstIRArg + i];
2539  AI->setName(Arg->getName() + "." + Twine(i));
2540  }
2541  break;
2542  }
2543 
2544  case ABIArgInfo::Ignore:
2545  assert(NumIRArgs == 0);
2546  // Initialize the local variable appropriately.
2547  if (!hasScalarEvaluationKind(Ty)) {
2548  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2549  } else {
2550  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2551  ArgVals.push_back(ParamValue::forDirect(U));
2552  }
2553  break;
2554  }
2555  }
2556 
2557  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2558  for (int I = Args.size() - 1; I >= 0; --I)
2559  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2560  } else {
2561  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2562  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2563  }
2564 }
2565 
2566 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2567  while (insn->use_empty()) {
2568  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2569  if (!bitcast) return;
2570 
2571  // This is "safe" because we would have used a ConstantExpr otherwise.
2572  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2573  bitcast->eraseFromParent();
2574  }
2575 }
2576 
2577 /// Try to emit a fused autorelease of a return result.
2579  llvm::Value *result) {
2580  // We must be immediately followed the cast.
2581  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2582  if (BB->empty()) return nullptr;
2583  if (&BB->back() != result) return nullptr;
2584 
2585  llvm::Type *resultType = result->getType();
2586 
2587  // result is in a BasicBlock and is therefore an Instruction.
2588  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2589 
2590  SmallVector<llvm::Instruction *, 4> InstsToKill;
2591 
2592  // Look for:
2593  // %generator = bitcast %type1* %generator2 to %type2*
2594  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2595  // We would have emitted this as a constant if the operand weren't
2596  // an Instruction.
2597  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2598 
2599  // Require the generator to be immediately followed by the cast.
2600  if (generator->getNextNode() != bitcast)
2601  return nullptr;
2602 
2603  InstsToKill.push_back(bitcast);
2604  }
2605 
2606  // Look for:
2607  // %generator = call i8* @objc_retain(i8* %originalResult)
2608  // or
2609  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2610  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2611  if (!call) return nullptr;
2612 
2613  bool doRetainAutorelease;
2614 
2615  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2616  doRetainAutorelease = true;
2617  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2619  doRetainAutorelease = false;
2620 
2621  // If we emitted an assembly marker for this call (and the
2622  // ARCEntrypoints field should have been set if so), go looking
2623  // for that call. If we can't find it, we can't do this
2624  // optimization. But it should always be the immediately previous
2625  // instruction, unless we needed bitcasts around the call.
2627  llvm::Instruction *prev = call->getPrevNode();
2628  assert(prev);
2629  if (isa<llvm::BitCastInst>(prev)) {
2630  prev = prev->getPrevNode();
2631  assert(prev);
2632  }
2633  assert(isa<llvm::CallInst>(prev));
2634  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2636  InstsToKill.push_back(prev);
2637  }
2638  } else {
2639  return nullptr;
2640  }
2641 
2642  result = call->getArgOperand(0);
2643  InstsToKill.push_back(call);
2644 
2645  // Keep killing bitcasts, for sanity. Note that we no longer care
2646  // about precise ordering as long as there's exactly one use.
2647  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2648  if (!bitcast->hasOneUse()) break;
2649  InstsToKill.push_back(bitcast);
2650  result = bitcast->getOperand(0);
2651  }
2652 
2653  // Delete all the unnecessary instructions, from latest to earliest.
2654  for (auto *I : InstsToKill)
2655  I->eraseFromParent();
2656 
2657  // Do the fused retain/autorelease if we were asked to.
2658  if (doRetainAutorelease)
2659  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2660 
2661  // Cast back to the result type.
2662  return CGF.Builder.CreateBitCast(result, resultType);
2663 }
2664 
2665 /// If this is a +1 of the value of an immutable 'self', remove it.
2667  llvm::Value *result) {
2668  // This is only applicable to a method with an immutable 'self'.
2669  const ObjCMethodDecl *method =
2670  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2671  if (!method) return nullptr;
2672  const VarDecl *self = method->getSelfDecl();
2673  if (!self->getType().isConstQualified()) return nullptr;
2674 
2675  // Look for a retain call.
2676  llvm::CallInst *retainCall =
2677  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2678  if (!retainCall ||
2679  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2680  return nullptr;
2681 
2682  // Look for an ordinary load of 'self'.
2683  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2684  llvm::LoadInst *load =
2685  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2686  if (!load || load->isAtomic() || load->isVolatile() ||
2687  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2688  return nullptr;
2689 
2690  // Okay! Burn it all down. This relies for correctness on the
2691  // assumption that the retain is emitted as part of the return and
2692  // that thereafter everything is used "linearly".
2693  llvm::Type *resultType = result->getType();
2694  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2695  assert(retainCall->use_empty());
2696  retainCall->eraseFromParent();
2697  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2698 
2699  return CGF.Builder.CreateBitCast(load, resultType);
2700 }
2701 
2702 /// Emit an ARC autorelease of the result of a function.
2703 ///
2704 /// \return the value to actually return from the function
2706  llvm::Value *result) {
2707  // If we're returning 'self', kill the initial retain. This is a
2708  // heuristic attempt to "encourage correctness" in the really unfortunate
2709  // case where we have a return of self during a dealloc and we desperately
2710  // need to avoid the possible autorelease.
2711  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2712  return self;
2713 
2714  // At -O0, try to emit a fused retain/autorelease.
2715  if (CGF.shouldUseFusedARCCalls())
2716  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2717  return fused;
2718 
2719  return CGF.EmitARCAutoreleaseReturnValue(result);
2720 }
2721 
2722 /// Heuristically search for a dominating store to the return-value slot.
2723 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2724  // Check if a User is a store which pointerOperand is the ReturnValue.
2725  // We are looking for stores to the ReturnValue, not for stores of the
2726  // ReturnValue to some other location.
2727  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2728  auto *SI = dyn_cast<llvm::StoreInst>(U);
2729  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2730  return nullptr;
2731  // These aren't actually possible for non-coerced returns, and we
2732  // only care about non-coerced returns on this code path.
2733  assert(!SI->isAtomic() && !SI->isVolatile());
2734  return SI;
2735  };
2736  // If there are multiple uses of the return-value slot, just check
2737  // for something immediately preceding the IP. Sometimes this can
2738  // happen with how we generate implicit-returns; it can also happen
2739  // with noreturn cleanups.
2740  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2741  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2742  if (IP->empty()) return nullptr;
2743  llvm::Instruction *I = &IP->back();
2744 
2745  // Skip lifetime markers
2746  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2747  IE = IP->rend();
2748  II != IE; ++II) {
2749  if (llvm::IntrinsicInst *Intrinsic =
2750  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2751  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2752  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2753  ++II;
2754  if (II == IE)
2755  break;
2756  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2757  continue;
2758  }
2759  }
2760  I = &*II;
2761  break;
2762  }
2763 
2764  return GetStoreIfValid(I);
2765  }
2766 
2767  llvm::StoreInst *store =
2768  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2769  if (!store) return nullptr;
2770 
2771  // Now do a first-and-dirty dominance check: just walk up the
2772  // single-predecessors chain from the current insertion point.
2773  llvm::BasicBlock *StoreBB = store->getParent();
2774  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2775  while (IP != StoreBB) {
2776  if (!(IP = IP->getSinglePredecessor()))
2777  return nullptr;
2778  }
2779 
2780  // Okay, the store's basic block dominates the insertion point; we
2781  // can do our thing.
2782  return store;
2783 }
2784 
2786  bool EmitRetDbgLoc,
2787  SourceLocation EndLoc) {
2788  if (FI.isNoReturn()) {
2789  // Noreturn functions don't return.
2790  EmitUnreachable(EndLoc);
2791  return;
2792  }
2793 
2794  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2795  // Naked functions don't have epilogues.
2796  Builder.CreateUnreachable();
2797  return;
2798  }
2799 
2800  // Functions with no result always return void.
2801  if (!ReturnValue.isValid()) {
2802  Builder.CreateRetVoid();
2803  return;
2804  }
2805 
2806  llvm::DebugLoc RetDbgLoc;
2807  llvm::Value *RV = nullptr;
2808  QualType RetTy = FI.getReturnType();
2809  const ABIArgInfo &RetAI = FI.getReturnInfo();
2810 
2811  switch (RetAI.getKind()) {
2812  case ABIArgInfo::InAlloca:
2813  // Aggregrates get evaluated directly into the destination. Sometimes we
2814  // need to return the sret value in a register, though.
2815  assert(hasAggregateEvaluationKind(RetTy));
2816  if (RetAI.getInAllocaSRet()) {
2817  llvm::Function::arg_iterator EI = CurFn->arg_end();
2818  --EI;
2819  llvm::Value *ArgStruct = &*EI;
2820  llvm::Value *SRet = Builder.CreateStructGEP(
2821  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2822  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2823  }
2824  break;
2825 
2826  case ABIArgInfo::Indirect: {
2827  auto AI = CurFn->arg_begin();
2828  if (RetAI.isSRetAfterThis())
2829  ++AI;
2830  switch (getEvaluationKind(RetTy)) {
2831  case TEK_Complex: {
2832  ComplexPairTy RT =
2833  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2834  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2835  /*isInit*/ true);
2836  break;
2837  }
2838  case TEK_Aggregate:
2839  // Do nothing; aggregrates get evaluated directly into the destination.
2840  break;
2841  case TEK_Scalar:
2842  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2843  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2844  /*isInit*/ true);
2845  break;
2846  }
2847  break;
2848  }
2849 
2850  case ABIArgInfo::Extend:
2851  case ABIArgInfo::Direct:
2852  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2853  RetAI.getDirectOffset() == 0) {
2854  // The internal return value temp always will have pointer-to-return-type
2855  // type, just do a load.
2856 
2857  // If there is a dominating store to ReturnValue, we can elide
2858  // the load, zap the store, and usually zap the alloca.
2859  if (llvm::StoreInst *SI =
2861  // Reuse the debug location from the store unless there is
2862  // cleanup code to be emitted between the store and return
2863  // instruction.
2864  if (EmitRetDbgLoc && !AutoreleaseResult)
2865  RetDbgLoc = SI->getDebugLoc();
2866  // Get the stored value and nuke the now-dead store.
2867  RV = SI->getValueOperand();
2868  SI->eraseFromParent();
2869 
2870  // If that was the only use of the return value, nuke it as well now.
2871  auto returnValueInst = ReturnValue.getPointer();
2872  if (returnValueInst->use_empty()) {
2873  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2874  alloca->eraseFromParent();
2875  ReturnValue = Address::invalid();
2876  }
2877  }
2878 
2879  // Otherwise, we have to do a simple load.
2880  } else {
2881  RV = Builder.CreateLoad(ReturnValue);
2882  }
2883  } else {
2884  // If the value is offset in memory, apply the offset now.
2885  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2886 
2887  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2888  }
2889 
2890  // In ARC, end functions that return a retainable type with a call
2891  // to objc_autoreleaseReturnValue.
2892  if (AutoreleaseResult) {
2893 #ifndef NDEBUG
2894  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2895  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2896  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2897  // CurCodeDecl or BlockInfo.
2898  QualType RT;
2899 
2900  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2901  RT = FD->getReturnType();
2902  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2903  RT = MD->getReturnType();
2904  else if (isa<BlockDecl>(CurCodeDecl))
2905  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2906  else
2907  llvm_unreachable("Unexpected function/method type");
2908 
2909  assert(getLangOpts().ObjCAutoRefCount &&
2910  !FI.isReturnsRetained() &&
2911  RT->isObjCRetainableType());
2912 #endif
2913  RV = emitAutoreleaseOfResult(*this, RV);
2914  }
2915 
2916  break;
2917 
2918  case ABIArgInfo::Ignore:
2919  break;
2920 
2922  auto coercionType = RetAI.getCoerceAndExpandType();
2923  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2924 
2925  // Load all of the coerced elements out into results.
2927  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2928  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2929  auto coercedEltType = coercionType->getElementType(i);
2930  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2931  continue;
2932 
2933  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2934  auto elt = Builder.CreateLoad(eltAddr);
2935  results.push_back(elt);
2936  }
2937 
2938  // If we have one result, it's the single direct result type.
2939  if (results.size() == 1) {
2940  RV = results[0];
2941 
2942  // Otherwise, we need to make a first-class aggregate.
2943  } else {
2944  // Construct a return type that lacks padding elements.
2945  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2946 
2947  RV = llvm::UndefValue::get(returnType);
2948  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2949  RV = Builder.CreateInsertValue(RV, results[i], i);
2950  }
2951  }
2952  break;
2953  }
2954 
2955  case ABIArgInfo::Expand:
2956  llvm_unreachable("Invalid ABI kind for return argument");
2957  }
2958 
2959  llvm::Instruction *Ret;
2960  if (RV) {
2961  EmitReturnValueCheck(RV);
2962  Ret = Builder.CreateRet(RV);
2963  } else {
2964  Ret = Builder.CreateRetVoid();
2965  }
2966 
2967  if (RetDbgLoc)
2968  Ret->setDebugLoc(std::move(RetDbgLoc));
2969 }
2970 
2972  // A current decl may not be available when emitting vtable thunks.
2973  if (!CurCodeDecl)
2974  return;
2975 
2976  ReturnsNonNullAttr *RetNNAttr = nullptr;
2977  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2978  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2979 
2980  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2981  return;
2982 
2983  // Prefer the returns_nonnull attribute if it's present.
2984  SourceLocation AttrLoc;
2985  SanitizerMask CheckKind;
2986  SanitizerHandler Handler;
2987  if (RetNNAttr) {
2988  assert(!requiresReturnValueNullabilityCheck() &&
2989  "Cannot check nullability and the nonnull attribute");
2990  AttrLoc = RetNNAttr->getLocation();
2991  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2992  Handler = SanitizerHandler::NonnullReturn;
2993  } else {
2994  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2995  if (auto *TSI = DD->getTypeSourceInfo())
2996  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2997  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2998  CheckKind = SanitizerKind::NullabilityReturn;
2999  Handler = SanitizerHandler::NullabilityReturn;
3000  }
3001 
3002  SanitizerScope SanScope(this);
3003 
3004  // Make sure the "return" source location is valid. If we're checking a
3005  // nullability annotation, make sure the preconditions for the check are met.
3006  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3007  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3008  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3009  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3010  if (requiresReturnValueNullabilityCheck())
3011  CanNullCheck =
3012  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3013  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3014  EmitBlock(Check);
3015 
3016  // Now do the null check.
3017  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3018  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3019  llvm::Value *DynamicData[] = {SLocPtr};
3020  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3021 
3022  EmitBlock(NoCheck);
3023 
3024 #ifndef NDEBUG
3025  // The return location should not be used after the check has been emitted.
3026  ReturnLocation = Address::invalid();
3027 #endif
3028 }
3029 
3031  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3032  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3033 }
3034 
3036  QualType Ty) {
3037  // FIXME: Generate IR in one pass, rather than going back and fixing up these
3038  // placeholders.
3039  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3040  llvm::Type *IRPtrTy = IRTy->getPointerTo();
3041  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3042 
3043  // FIXME: When we generate this IR in one pass, we shouldn't need
3044  // this win32-specific alignment hack.
3046  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3047 
3048  return AggValueSlot::forAddr(Address(Placeholder, Align),
3049  Ty.getQualifiers(),
3054 }
3055 
3057  const VarDecl *param,
3058  SourceLocation loc) {
3059  // StartFunction converted the ABI-lowered parameter(s) into a
3060  // local alloca. We need to turn that into an r-value suitable
3061  // for EmitCall.
3062  Address local = GetAddrOfLocalVar(param);
3063 
3064  QualType type = param->getType();
3065 
3066  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3067  "cannot emit delegate call arguments for inalloca arguments!");
3068 
3069  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3070  // but the argument needs to be the original pointer.
3071  if (type->isReferenceType()) {
3072  args.add(RValue::get(Builder.CreateLoad(local)), type);
3073 
3074  // In ARC, move out of consumed arguments so that the release cleanup
3075  // entered by StartFunction doesn't cause an over-release. This isn't
3076  // optimal -O0 code generation, but it should get cleaned up when
3077  // optimization is enabled. This also assumes that delegate calls are
3078  // performed exactly once for a set of arguments, but that should be safe.
3079  } else if (getLangOpts().ObjCAutoRefCount &&
3080  param->hasAttr<NSConsumedAttr>() &&
3081  type->isObjCRetainableType()) {
3082  llvm::Value *ptr = Builder.CreateLoad(local);
3083  auto null =
3084  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3085  Builder.CreateStore(null, local);
3086  args.add(RValue::get(ptr), type);
3087 
3088  // For the most part, we just need to load the alloca, except that
3089  // aggregate r-values are actually pointers to temporaries.
3090  } else {
3091  args.add(convertTempToRValue(local, type, loc), type);
3092  }
3093 
3094  // Deactivate the cleanup for the callee-destructed param that was pushed.
3095  if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3097  type.isDestructedType()) {
3099  CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3100  assert(cleanup.isValid() &&
3101  "cleanup for callee-destructed param not recorded");
3102  // This unreachable is a temporary marker which will be removed later.
3103  llvm::Instruction *isActive = Builder.CreateUnreachable();
3104  args.addArgCleanupDeactivation(cleanup, isActive);
3105  }
3106 }
3107 
3108 static bool isProvablyNull(llvm::Value *addr) {
3109  return isa<llvm::ConstantPointerNull>(addr);
3110 }
3111 
3112 /// Emit the actual writing-back of a writeback.
3114  const CallArgList::Writeback &writeback) {
3115  const LValue &srcLV = writeback.Source;
3116  Address srcAddr = srcLV.getAddress();
3117  assert(!isProvablyNull(srcAddr.getPointer()) &&
3118  "shouldn't have writeback for provably null argument");
3119 
3120  llvm::BasicBlock *contBB = nullptr;
3121 
3122  // If the argument wasn't provably non-null, we need to null check
3123  // before doing the store.
3124  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3125  CGF.CGM.getDataLayout());
3126  if (!provablyNonNull) {
3127  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3128  contBB = CGF.createBasicBlock("icr.done");
3129 
3130  llvm::Value *isNull =
3131  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3132  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3133  CGF.EmitBlock(writebackBB);
3134  }
3135 
3136  // Load the value to writeback.
3137  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3138 
3139  // Cast it back, in case we're writing an id to a Foo* or something.
3140  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3141  "icr.writeback-cast");
3142 
3143  // Perform the writeback.
3144 
3145  // If we have a "to use" value, it's something we need to emit a use
3146  // of. This has to be carefully threaded in: if it's done after the
3147  // release it's potentially undefined behavior (and the optimizer
3148  // will ignore it), and if it happens before the retain then the
3149  // optimizer could move the release there.
3150  if (writeback.ToUse) {
3151  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3152 
3153  // Retain the new value. No need to block-copy here: the block's
3154  // being passed up the stack.
3155  value = CGF.EmitARCRetainNonBlock(value);
3156 
3157  // Emit the intrinsic use here.
3158  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3159 
3160  // Load the old value (primitively).
3161  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3162 
3163  // Put the new value in place (primitively).
3164  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3165 
3166  // Release the old value.
3167  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3168 
3169  // Otherwise, we can just do a normal lvalue store.
3170  } else {
3171  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3172  }
3173 
3174  // Jump to the continuation block.
3175  if (!provablyNonNull)
3176  CGF.EmitBlock(contBB);
3177 }
3178 
3180  const CallArgList &args) {
3181  for (const auto &I : args.writebacks())
3182  emitWriteback(CGF, I);
3183 }
3184 
3186  const CallArgList &CallArgs) {
3188  CallArgs.getCleanupsToDeactivate();
3189  // Iterate in reverse to increase the likelihood of popping the cleanup.
3190  for (const auto &I : llvm::reverse(Cleanups)) {
3191  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3192  I.IsActiveIP->eraseFromParent();
3193  }
3194 }
3195 
3196 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3197  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3198  if (uop->getOpcode() == UO_AddrOf)
3199  return uop->getSubExpr();
3200  return nullptr;
3201 }
3202 
3203 /// Emit an argument that's being passed call-by-writeback. That is,
3204 /// we are passing the address of an __autoreleased temporary; it
3205 /// might be copy-initialized with the current value of the given
3206 /// address, but it will definitely be copied out of after the call.
3208  const ObjCIndirectCopyRestoreExpr *CRE) {
3209  LValue srcLV;
3210 
3211  // Make an optimistic effort to emit the address as an l-value.
3212  // This can fail if the argument expression is more complicated.
3213  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3214  srcLV = CGF.EmitLValue(lvExpr);
3215 
3216  // Otherwise, just emit it as a scalar.
3217  } else {
3218  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3219 
3220  QualType srcAddrType =
3221  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3222  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3223  }
3224  Address srcAddr = srcLV.getAddress();
3225 
3226  // The dest and src types don't necessarily match in LLVM terms
3227  // because of the crazy ObjC compatibility rules.
3228 
3229  llvm::PointerType *destType =
3230  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3231 
3232  // If the address is a constant null, just pass the appropriate null.
3233  if (isProvablyNull(srcAddr.getPointer())) {
3234  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3235  CRE->getType());
3236  return;
3237  }
3238 
3239  // Create the temporary.
3240  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3241  CGF.getPointerAlign(),
3242  "icr.temp");
3243  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3244  // and that cleanup will be conditional if we can't prove that the l-value
3245  // isn't null, so we need to register a dominating point so that the cleanups
3246  // system will make valid IR.
3248 
3249  // Zero-initialize it if we're not doing a copy-initialization.
3250  bool shouldCopy = CRE->shouldCopy();
3251  if (!shouldCopy) {
3252  llvm::Value *null =
3253  llvm::ConstantPointerNull::get(
3254  cast<llvm::PointerType>(destType->getElementType()));
3255  CGF.Builder.CreateStore(null, temp);
3256  }
3257 
3258  llvm::BasicBlock *contBB = nullptr;
3259  llvm::BasicBlock *originBB = nullptr;
3260 
3261  // If the address is *not* known to be non-null, we need to switch.
3262  llvm::Value *finalArgument;
3263 
3264  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3265  CGF.CGM.getDataLayout());
3266  if (provablyNonNull) {
3267  finalArgument = temp.getPointer();
3268  } else {
3269  llvm::Value *isNull =
3270  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3271 
3272  finalArgument = CGF.Builder.CreateSelect(isNull,
3273  llvm::ConstantPointerNull::get(destType),
3274  temp.getPointer(), "icr.argument");
3275 
3276  // If we need to copy, then the load has to be conditional, which
3277  // means we need control flow.
3278  if (shouldCopy) {
3279  originBB = CGF.Builder.GetInsertBlock();
3280  contBB = CGF.createBasicBlock("icr.cont");
3281  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3282  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3283  CGF.EmitBlock(copyBB);
3284  condEval.begin(CGF);
3285  }
3286  }
3287 
3288  llvm::Value *valueToUse = nullptr;
3289 
3290  // Perform a copy if necessary.
3291  if (shouldCopy) {
3292  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3293  assert(srcRV.isScalar());
3294 
3295  llvm::Value *src = srcRV.getScalarVal();
3296  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3297  "icr.cast");
3298 
3299  // Use an ordinary store, not a store-to-lvalue.
3300  CGF.Builder.CreateStore(src, temp);
3301 
3302  // If optimization is enabled, and the value was held in a
3303  // __strong variable, we need to tell the optimizer that this
3304  // value has to stay alive until we're doing the store back.
3305  // This is because the temporary is effectively unretained,
3306  // and so otherwise we can violate the high-level semantics.
3307  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3309  valueToUse = src;
3310  }
3311  }
3312 
3313  // Finish the control flow if we needed it.
3314  if (shouldCopy && !provablyNonNull) {
3315  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3316  CGF.EmitBlock(contBB);
3317 
3318  // Make a phi for the value to intrinsically use.
3319  if (valueToUse) {
3320  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3321  "icr.to-use");
3322  phiToUse->addIncoming(valueToUse, copyBB);
3323  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3324  originBB);
3325  valueToUse = phiToUse;
3326  }
3327 
3328  condEval.end(CGF);
3329  }
3330 
3331  args.addWriteback(srcLV, temp, valueToUse);
3332  args.add(RValue::get(finalArgument), CRE->getType());
3333 }
3334 
3336  assert(!StackBase);
3337 
3338  // Save the stack.
3339  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3340  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3341 }
3342 
3344  if (StackBase) {
3345  // Restore the stack after the call.
3346  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3347  CGF.Builder.CreateCall(F, StackBase);
3348  }
3349 }
3350 
3352  SourceLocation ArgLoc,
3353  AbstractCallee AC,
3354  unsigned ParmNum) {
3355  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3356  SanOpts.has(SanitizerKind::NullabilityArg)))
3357  return;
3358 
3359  // The param decl may be missing in a variadic function.
3360  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3361  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3362 
3363  // Prefer the nonnull attribute if it's present.
3364  const NonNullAttr *NNAttr = nullptr;
3365  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3366  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3367 
3368  bool CanCheckNullability = false;
3369  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3370  auto Nullability = PVD->getType()->getNullability(getContext());
3371  CanCheckNullability = Nullability &&
3373  PVD->getTypeSourceInfo();
3374  }
3375 
3376  if (!NNAttr && !CanCheckNullability)
3377  return;
3378 
3379  SourceLocation AttrLoc;
3380  SanitizerMask CheckKind;
3381  SanitizerHandler Handler;
3382  if (NNAttr) {
3383  AttrLoc = NNAttr->getLocation();
3384  CheckKind = SanitizerKind::NonnullAttribute;
3385  Handler = SanitizerHandler::NonnullArg;
3386  } else {
3387  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3388  CheckKind = SanitizerKind::NullabilityArg;
3389  Handler = SanitizerHandler::NullabilityArg;
3390  }
3391 
3392  SanitizerScope SanScope(this);
3393  assert(RV.isScalar());
3394  llvm::Value *V = RV.getScalarVal();
3395  llvm::Value *Cond =
3396  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3397  llvm::Constant *StaticData[] = {
3398  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3399  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3400  };
3401  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3402 }
3403 
3405  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3406  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3407  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3408  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3409 
3410  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3411  // because arguments are destroyed left to right in the callee. As a special
3412  // case, there are certain language constructs that require left-to-right
3413  // evaluation, and in those cases we consider the evaluation order requirement
3414  // to trump the "destruction order is reverse construction order" guarantee.
3415  bool LeftToRight =
3416  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3417  ? Order == EvaluationOrder::ForceLeftToRight
3418  : Order != EvaluationOrder::ForceRightToLeft;
3419 
3420  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3421  RValue EmittedArg) {
3422  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3423  return;
3424  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3425  if (PS == nullptr)
3426  return;
3427 
3428  const auto &Context = getContext();
3429  auto SizeTy = Context.getSizeType();
3430  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3431  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3432  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3433  EmittedArg.getScalarVal());
3434  Args.add(RValue::get(V), SizeTy);
3435  // If we're emitting args in reverse, be sure to do so with
3436  // pass_object_size, as well.
3437  if (!LeftToRight)
3438  std::swap(Args.back(), *(&Args.back() - 1));
3439  };
3440 
3441  // Insert a stack save if we're going to need any inalloca args.
3442  bool HasInAllocaArgs = false;
3443  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3444  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3445  I != E && !HasInAllocaArgs; ++I)
3446  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3447  if (HasInAllocaArgs) {
3448  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3449  Args.allocateArgumentMemory(*this);
3450  }
3451  }
3452 
3453  // Evaluate each argument in the appropriate order.
3454  size_t CallArgsStart = Args.size();
3455  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3456  unsigned Idx = LeftToRight ? I : E - I - 1;
3457  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3458  unsigned InitialArgSize = Args.size();
3459  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3460  // the argument and parameter match or the objc method is parameterized.
3461  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3462  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3463  ArgTypes[Idx]) ||
3464  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3465  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3466  "Argument and parameter types don't match");
3467  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3468  // In particular, we depend on it being the last arg in Args, and the
3469  // objectsize bits depend on there only being one arg if !LeftToRight.
3470  assert(InitialArgSize + 1 == Args.size() &&
3471  "The code below depends on only adding one arg per EmitCallArg");
3472  (void)InitialArgSize;
3473  // Since pointer argument are never emitted as LValue, it is safe to emit
3474  // non-null argument check for r-value only.
3475  if (!Args.back().hasLValue()) {
3476  RValue RVArg = Args.back().getKnownRValue();
3477  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3478  ParamsToSkip + Idx);
3479  // @llvm.objectsize should never have side-effects and shouldn't need
3480  // destruction/cleanups, so we can safely "emit" it after its arg,
3481  // regardless of right-to-leftness
3482  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3483  }
3484  }
3485 
3486  if (!LeftToRight) {
3487  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3488  // IR function.
3489  std::reverse(Args.begin() + CallArgsStart, Args.end());
3490  }
3491 }
3492 
3493 namespace {
3494 
3495 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3496  DestroyUnpassedArg(Address Addr, QualType Ty)
3497  : Addr(Addr), Ty(Ty) {}
3498 
3499  Address Addr;
3500  QualType Ty;
3501 
3502  void Emit(CodeGenFunction &CGF, Flags flags) override {
3504  if (DtorKind == QualType::DK_cxx_destructor) {
3505  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3506  assert(!Dtor->isTrivial());
3507  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3508  /*Delegating=*/false, Addr);
3509  } else {
3510  CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3511  }
3512  }
3513 };
3514 
3515 struct DisableDebugLocationUpdates {
3516  CodeGenFunction &CGF;
3517  bool disabledDebugInfo;
3518  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3519  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3520  CGF.disableDebugInfo();
3521  }
3522  ~DisableDebugLocationUpdates() {
3523  if (disabledDebugInfo)
3524  CGF.enableDebugInfo();
3525  }
3526 };
3527 
3528 } // end anonymous namespace
3529 
3531  if (!HasLV)
3532  return RV;
3533  LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3535  LV.isVolatile());
3536  IsUsed = true;
3537  return RValue::getAggregate(Copy.getAddress());
3538 }
3539 
3541  LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3542  if (!HasLV && RV.isScalar())
3543  CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3544  else if (!HasLV && RV.isComplex())
3545  CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3546  else {
3547  auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3548  LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3549  // We assume that call args are never copied into subobjects.
3550  CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3551  HasLV ? LV.isVolatileQualified()
3552  : RV.isVolatileQualified());
3553  }
3554  IsUsed = true;
3555 }
3556 
3558  QualType type) {
3559  DisableDebugLocationUpdates Dis(*this, E);
3560  if (const ObjCIndirectCopyRestoreExpr *CRE
3561  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3562  assert(getLangOpts().ObjCAutoRefCount);
3563  return emitWritebackArg(*this, args, CRE);
3564  }
3565 
3566  assert(type->isReferenceType() == E->isGLValue() &&
3567  "reference binding to unmaterialized r-value!");
3568 
3569  if (E->isGLValue()) {
3570  assert(E->getObjectKind() == OK_Ordinary);
3571  return args.add(EmitReferenceBindingToExpr(E), type);
3572  }
3573 
3574  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3575 
3576  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3577  // However, we still have to push an EH-only cleanup in case we unwind before
3578  // we make it to the call.
3579  if (HasAggregateEvalKind &&
3581  // If we're using inalloca, use the argument memory. Otherwise, use a
3582  // temporary.
3583  AggValueSlot Slot;
3584  if (args.isUsingInAlloca())
3585  Slot = createPlaceholderSlot(*this, type);
3586  else
3587  Slot = CreateAggTemp(type, "agg.tmp");
3588 
3589  bool DestroyedInCallee = true, NeedsEHCleanup = true;
3590  if (const auto *RD = type->getAsCXXRecordDecl())
3591  DestroyedInCallee = RD->hasNonTrivialDestructor();
3592  else
3593  NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3594 
3595  if (DestroyedInCallee)
3596  Slot.setExternallyDestructed();
3597 
3598  EmitAggExpr(E, Slot);
3599  RValue RV = Slot.asRValue();
3600  args.add(RV, type);
3601 
3602  if (DestroyedInCallee && NeedsEHCleanup) {
3603  // Create a no-op GEP between the placeholder and the cleanup so we can
3604  // RAUW it successfully. It also serves as a marker of the first
3605  // instruction where the cleanup is active.
3606  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3607  type);
3608  // This unreachable is a temporary marker which will be removed later.
3609  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3610  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3611  }
3612  return;
3613  }
3614 
3615  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3616  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3617  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3618  assert(L.isSimple());
3619  args.addUncopiedAggregate(L, type);
3620  return;
3621  }
3622 
3623  args.add(EmitAnyExprToTemp(E), type);
3624 }
3625 
3626 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3627  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3628  // implicitly widens null pointer constants that are arguments to varargs
3629  // functions to pointer-sized ints.
3630  if (!getTarget().getTriple().isOSWindows())
3631  return Arg->getType();
3632 
3633  if (Arg->getType()->isIntegerType() &&
3634  getContext().getTypeSize(Arg->getType()) <
3638  return getContext().getIntPtrType();
3639  }
3640 
3641  return Arg->getType();
3642 }
3643 
3644 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3645 // optimizer it can aggressively ignore unwind edges.
3646 void
3647 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3648  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3649  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3650  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3651  CGM.getNoObjCARCExceptionsMetadata());
3652 }
3653 
3654 /// Emits a call to the given no-arguments nounwind runtime function.
3655 llvm::CallInst *
3657  const llvm::Twine &name) {
3658  return EmitNounwindRuntimeCall(callee, None, name);
3659 }
3660 
3661 /// Emits a call to the given nounwind runtime function.
3662 llvm::CallInst *
3665  const llvm::Twine &name) {
3666  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3667  call->setDoesNotThrow();
3668  return call;
3669 }
3670 
3671 /// Emits a simple call (never an invoke) to the given no-arguments
3672 /// runtime function.
3673 llvm::CallInst *
3675  const llvm::Twine &name) {
3676  return EmitRuntimeCall(callee, None, name);
3677 }
3678 
3679 // Calls which may throw must have operand bundles indicating which funclet
3680 // they are nested within.
3684  // There is no need for a funclet operand bundle if we aren't inside a
3685  // funclet.
3686  if (!CurrentFuncletPad)
3687  return BundleList;
3688 
3689  // Skip intrinsics which cannot throw.
3690  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3691  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3692  return BundleList;
3693 
3694  BundleList.emplace_back("funclet", CurrentFuncletPad);
3695  return BundleList;
3696 }
3697 
3698 /// Emits a simple call (never an invoke) to the given runtime function.
3699 llvm::CallInst *
3702  const llvm::Twine &name) {
3703  llvm::CallInst *call =
3704  Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3705  call->setCallingConv(getRuntimeCC());
3706  return call;
3707 }
3708 
3709 /// Emits a call or invoke to the given noreturn runtime function.
3711  ArrayRef<llvm::Value*> args) {
3713  getBundlesForFunclet(callee);
3714 
3715  if (getInvokeDest()) {
3716  llvm::InvokeInst *invoke =
3717  Builder.CreateInvoke(callee,
3718  getUnreachableBlock(),
3719  getInvokeDest(),
3720  args,
3721  BundleList);
3722  invoke->setDoesNotReturn();
3723  invoke->setCallingConv(getRuntimeCC());
3724  } else {
3725  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3726  call->setDoesNotReturn();
3727  call->setCallingConv(getRuntimeCC());
3728  Builder.CreateUnreachable();
3729  }
3730 }
3731 
3732 /// Emits a call or invoke instruction to the given nullary runtime function.
3733 llvm::CallSite
3735  const Twine &name) {
3736  return EmitRuntimeCallOrInvoke(callee, None, name);
3737 }
3738 
3739 /// Emits a call or invoke instruction to the given runtime function.
3740 llvm::CallSite
3743  const Twine &name) {
3744  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3745  callSite.setCallingConv(getRuntimeCC());
3746  return callSite;
3747 }
3748 
3749 /// Emits a call or invoke instruction to the given function, depending
3750 /// on the current state of the EH stack.
3751 llvm::CallSite
3754  const Twine &Name) {
3755  llvm::BasicBlock *InvokeDest = getInvokeDest();
3757  getBundlesForFunclet(Callee);
3758 
3759  llvm::Instruction *Inst;
3760  if (!InvokeDest)
3761  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3762  else {
3763  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3764  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3765  Name);
3766  EmitBlock(ContBB);
3767  }
3768 
3769  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3770  // optimizer it can aggressively ignore unwind edges.
3771  if (CGM.getLangOpts().ObjCAutoRefCount)
3772  AddObjCARCExceptionMetadata(Inst);
3773 
3774  return llvm::CallSite(Inst);
3775 }
3776 
3777 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3778  llvm::Value *New) {
3779  DeferredReplacements.push_back(std::make_pair(Old, New));
3780 }
3781 
3783  const CGCallee &Callee,
3784  ReturnValueSlot ReturnValue,
3785  const CallArgList &CallArgs,
3786  llvm::Instruction **callOrInvoke,
3787  SourceLocation Loc) {
3788  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3789 
3790  assert(Callee.isOrdinary() || Callee.isVirtual());
3791 
3792  // Handle struct-return functions by passing a pointer to the
3793  // location that we would like to return into.
3794  QualType RetTy = CallInfo.getReturnType();
3795  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3796 
3797  llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3798 
3799  // 1. Set up the arguments.
3800 
3801  // If we're using inalloca, insert the allocation after the stack save.
3802  // FIXME: Do this earlier rather than hacking it in here!
3803  Address ArgMemory = Address::invalid();
3804  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3805  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3806  const llvm::DataLayout &DL = CGM.getDataLayout();
3807  ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3808  llvm::Instruction *IP = CallArgs.getStackBase();
3809  llvm::AllocaInst *AI;
3810  if (IP) {
3811  IP = IP->getNextNode();
3812  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3813  "argmem", IP);
3814  } else {
3815  AI = CreateTempAlloca(ArgStruct, "argmem");
3816  }
3817  auto Align = CallInfo.getArgStructAlignment();
3818  AI->setAlignment(Align.getQuantity());
3819  AI->setUsedWithInAlloca(true);
3820  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3821  ArgMemory = Address(AI, Align);
3822  }
3823 
3824  // Helper function to drill into the inalloca allocation.
3825  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3826  auto FieldOffset =
3827  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3828  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3829  };
3830 
3831  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3832  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3833 
3834  // If the call returns a temporary with struct return, create a temporary
3835  // alloca to hold the result, unless one is given to us.
3836  Address SRetPtr = Address::invalid();
3837  Address SRetAlloca = Address::invalid();
3838  llvm::Value *UnusedReturnSizePtr = nullptr;
3839  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3840  if (!ReturnValue.isNull()) {
3841  SRetPtr = ReturnValue.getValue();
3842  } else {
3843  SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3844  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3845  uint64_t size =
3846  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3847  UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3848  }
3849  }
3850  if (IRFunctionArgs.hasSRetArg()) {
3851  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3852  } else if (RetAI.isInAlloca()) {
3853  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3854  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3855  }
3856  }
3857 
3858  Address swiftErrorTemp = Address::invalid();
3859  Address swiftErrorArg = Address::invalid();
3860 
3861  // Translate all of the arguments as necessary to match the IR lowering.
3862  assert(CallInfo.arg_size() == CallArgs.size() &&
3863  "Mismatch between function signature & arguments.");
3864  unsigned ArgNo = 0;
3865  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3866  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3867  I != E; ++I, ++info_it, ++ArgNo) {
3868  const ABIArgInfo &ArgInfo = info_it->info;
3869 
3870  // Insert a padding argument to ensure proper alignment.
3871  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3872  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3873  llvm::UndefValue::get(ArgInfo.getPaddingType());
3874 
3875  unsigned FirstIRArg, NumIRArgs;
3876  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3877 
3878  switch (ArgInfo.getKind()) {
3879  case ABIArgInfo::InAlloca: {
3880  assert(NumIRArgs == 0);
3881  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3882  if (I->isAggregate()) {
3883  // Replace the placeholder with the appropriate argument slot GEP.
3884  Address Addr = I->hasLValue()
3885  ? I->getKnownLValue().getAddress()
3886  : I->getKnownRValue().getAggregateAddress();
3887  llvm::Instruction *Placeholder =
3888  cast<llvm::Instruction>(Addr.getPointer());
3889  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3890  Builder.SetInsertPoint(Placeholder);
3891  Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3892  Builder.restoreIP(IP);
3893  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3894  } else {
3895  // Store the RValue into the argument struct.
3896  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3897  unsigned AS = Addr.getType()->getPointerAddressSpace();
3898  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3899  // There are some cases where a trivial bitcast is not avoidable. The
3900  // definition of a type later in a translation unit may change it's type
3901  // from {}* to (%struct.foo*)*.
3902  if (Addr.getType() != MemType)
3903  Addr = Builder.CreateBitCast(Addr, MemType);
3904  I->copyInto(*this, Addr);
3905  }
3906  break;
3907  }
3908 
3909  case ABIArgInfo::Indirect: {
3910  assert(NumIRArgs == 1);
3911  if (!I->isAggregate()) {
3912  // Make a temporary alloca to pass the argument.
3913  Address Addr = CreateMemTempWithoutCast(
3914  I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3915  IRCallArgs[FirstIRArg] = Addr.getPointer();
3916 
3917  I->copyInto(*this, Addr);
3918  } else {
3919  // We want to avoid creating an unnecessary temporary+copy here;
3920  // however, we need one in three cases:
3921  // 1. If the argument is not byval, and we are required to copy the
3922  // source. (This case doesn't occur on any common architecture.)
3923  // 2. If the argument is byval, RV is not sufficiently aligned, and
3924  // we cannot force it to be sufficiently aligned.
3925  // 3. If the argument is byval, but RV is not located in default
3926  // or alloca address space.
3927  Address Addr = I->hasLValue()
3928  ? I->getKnownLValue().getAddress()
3929  : I->getKnownRValue().getAggregateAddress();
3930  llvm::Value *V = Addr.getPointer();
3931  CharUnits Align = ArgInfo.getIndirectAlign();
3932  const llvm::DataLayout *TD = &CGM.getDataLayout();
3933 
3934  assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3935  IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3936  TD->getAllocaAddrSpace()) &&
3937  "indirect argument must be in alloca address space");
3938 
3939  bool NeedCopy = false;
3940 
3941  if (Addr.getAlignment() < Align &&
3942  llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3943  Align.getQuantity()) {
3944  NeedCopy = true;
3945  } else if (I->hasLValue()) {
3946  auto LV = I->getKnownLValue();
3947  auto AS = LV.getAddressSpace();
3948  if ((!ArgInfo.getIndirectByVal() &&
3949  (LV.getAlignment() >=
3950  getContext().getTypeAlignInChars(I->Ty))) ||
3951  (ArgInfo.getIndirectByVal() &&
3952  ((AS != LangAS::Default && AS != LangAS::opencl_private &&
3953  AS != CGM.getASTAllocaAddressSpace())))) {
3954  NeedCopy = true;
3955  }
3956  }
3957  if (NeedCopy) {
3958  // Create an aligned temporary, and copy to it.
3959  Address AI = CreateMemTempWithoutCast(
3960  I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3961  IRCallArgs[FirstIRArg] = AI.getPointer();
3962  I->copyInto(*this, AI);
3963  } else {
3964  // Skip the extra memcpy call.
3965  auto *T = V->getType()->getPointerElementType()->getPointerTo(
3966  CGM.getDataLayout().getAllocaAddrSpace());
3967  IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3968  *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3969  true);
3970  }
3971  }
3972  break;
3973  }
3974 
3975  case ABIArgInfo::Ignore:
3976  assert(NumIRArgs == 0);
3977  break;
3978 
3979  case ABIArgInfo::Extend:
3980  case ABIArgInfo::Direct: {
3981  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3982  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3983  ArgInfo.getDirectOffset() == 0) {
3984  assert(NumIRArgs == 1);
3985  llvm::Value *V;
3986  if (!I->isAggregate())
3987  V = I->getKnownRValue().getScalarVal();
3988  else
3989  V = Builder.CreateLoad(
3990  I->hasLValue() ? I->getKnownLValue().getAddress()
3991  : I->getKnownRValue().getAggregateAddress());
3992 
3993  // Implement swifterror by copying into a new swifterror argument.
3994  // We'll write back in the normal path out of the call.
3995  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3997  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3998 
3999  QualType pointeeTy = I->Ty->getPointeeType();
4000  swiftErrorArg =
4001  Address(V, getContext().getTypeAlignInChars(pointeeTy));
4002 
4003  swiftErrorTemp =
4004  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4005  V = swiftErrorTemp.getPointer();
4006  cast<llvm::AllocaInst>(V)->setSwiftError(true);
4007 
4008  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4009  Builder.CreateStore(errorValue, swiftErrorTemp);
4010  }
4011 
4012  // We might have to widen integers, but we should never truncate.
4013  if (ArgInfo.getCoerceToType() != V->getType() &&
4014  V->getType()->isIntegerTy())
4015  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4016 
4017  // If the argument doesn't match, perform a bitcast to coerce it. This
4018  // can happen due to trivial type mismatches.
4019  if (FirstIRArg < IRFuncTy->getNumParams() &&
4020  V->getType() != IRFuncTy->getParamType(FirstIRArg))
4021  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4022 
4023  IRCallArgs[FirstIRArg] = V;
4024  break;
4025  }
4026 
4027  // FIXME: Avoid the conversion through memory if possible.
4028  Address Src = Address::invalid();
4029  if (!I->isAggregate()) {
4030  Src = CreateMemTemp(I->Ty, "coerce");
4031  I->copyInto(*this, Src);
4032  } else {
4033  Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4034  : I->getKnownRValue().getAggregateAddress();
4035  }
4036 
4037  // If the value is offset in memory, apply the offset now.
4038  Src = emitAddressAtOffset(*this, Src, ArgInfo);
4039 
4040  // Fast-isel and the optimizer generally like scalar values better than
4041  // FCAs, so we flatten them if this is safe to do for this argument.
4042  llvm::StructType *STy =
4043  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4044  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4045  llvm::Type *SrcTy = Src.getType()->getElementType();
4046  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4047  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4048 
4049  // If the source type is smaller than the destination type of the
4050  // coerce-to logic, copy the source value into a temp alloca the size
4051  // of the destination type to allow loading all of it. The bits past
4052  // the source value are left undef.
4053  if (SrcSize < DstSize) {
4054  Address TempAlloca
4055  = CreateTempAlloca(STy, Src.getAlignment(),
4056  Src.getName() + ".coerce");
4057  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4058  Src = TempAlloca;
4059  } else {
4060  Src = Builder.CreateBitCast(Src,
4061  STy->getPointerTo(Src.getAddressSpace()));
4062  }
4063 
4064  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4065  assert(NumIRArgs == STy->getNumElements());
4066  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4067  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4068  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4069  llvm::Value *LI = Builder.CreateLoad(EltPtr);
4070  IRCallArgs[FirstIRArg + i] = LI;
4071  }
4072  } else {
4073  // In the simple case, just pass the coerced loaded value.
4074  assert(NumIRArgs == 1);
4075  IRCallArgs[FirstIRArg] =
4076  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4077  }
4078 
4079  break;
4080  }
4081 
4083  auto coercionType = ArgInfo.getCoerceAndExpandType();
4084  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4085 
4086  llvm::Value *tempSize = nullptr;
4087  Address addr = Address::invalid();
4088  Address AllocaAddr = Address::invalid();
4089  if (I->isAggregate()) {
4090  addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4091  : I->getKnownRValue().getAggregateAddress();
4092 
4093  } else {
4094  RValue RV = I->getKnownRValue();
4095  assert(RV.isScalar()); // complex should always just be direct
4096 
4097  llvm::Type *scalarType = RV.getScalarVal()->getType();
4098  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4099  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4100 
4101  // Materialize to a temporary.
4102  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4104  layout->getAlignment(), scalarAlign)),
4105  "tmp",
4106  /*ArraySize=*/nullptr, &AllocaAddr);
4107  tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4108 
4109  Builder.CreateStore(RV.getScalarVal(), addr);
4110  }
4111 
4112  addr = Builder.CreateElementBitCast(addr, coercionType);
4113 
4114  unsigned IRArgPos = FirstIRArg;
4115  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4116  llvm::Type *eltType = coercionType->getElementType(i);
4117  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4118  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4119  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4120  IRCallArgs[IRArgPos++] = elt;
4121  }
4122  assert(IRArgPos == FirstIRArg + NumIRArgs);
4123 
4124  if (tempSize) {
4125  EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4126  }
4127 
4128  break;
4129  }
4130 
4131  case ABIArgInfo::Expand:
4132  unsigned IRArgPos = FirstIRArg;
4133  ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4134  assert(IRArgPos == FirstIRArg + NumIRArgs);
4135  break;
4136  }
4137  }
4138 
4139  const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4140  llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4141 
4142  // If we're using inalloca, set up that argument.
4143  if (ArgMemory.isValid()) {
4144  llvm::Value *Arg = ArgMemory.getPointer();
4145  if (CallInfo.isVariadic()) {
4146  // When passing non-POD arguments by value to variadic functions, we will
4147  // end up with a variadic prototype and an inalloca call site. In such
4148  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4149  // the callee.
4150  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4151  auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4152  CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4153  } else {
4154  llvm::Type *LastParamTy =
4155  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4156  if (Arg->getType() != LastParamTy) {
4157 #ifndef NDEBUG
4158  // Assert that these structs have equivalent element types.
4159  llvm::StructType *FullTy = CallInfo.getArgStruct();
4160  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4161  cast<llvm::PointerType>(LastParamTy)->getElementType());
4162  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4163  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4164  DE = DeclaredTy->element_end(),
4165  FI = FullTy->element_begin();
4166  DI != DE; ++DI, ++FI)
4167  assert(*DI == *FI);
4168 #endif
4169  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4170  }
4171  }
4172  assert(IRFunctionArgs.hasInallocaArg());
4173  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4174  }
4175 
4176  // 2. Prepare the function pointer.
4177 
4178  // If the callee is a bitcast of a non-variadic function to have a
4179  // variadic function pointer type, check to see if we can remove the
4180  // bitcast. This comes up with unprototyped functions.
4181  //
4182  // This makes the IR nicer, but more importantly it ensures that we
4183  // can inline the function at -O0 if it is marked always_inline.
4184  auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4185  llvm::FunctionType *CalleeFT =
4186  cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4187  if (!CalleeFT->isVarArg())
4188  return Ptr;
4189 
4190  llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4191  if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4192  return Ptr;
4193 
4194  llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4195  if (!OrigFn)
4196  return Ptr;
4197 
4198  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4199 
4200  // If the original type is variadic, or if any of the component types
4201  // disagree, we cannot remove the cast.
4202  if (OrigFT->isVarArg() ||
4203  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4204  OrigFT->getReturnType() != CalleeFT->getReturnType())
4205  return Ptr;
4206 
4207  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4208  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4209  return Ptr;
4210 
4211  return OrigFn;
4212  };
4213  CalleePtr = simplifyVariadicCallee(CalleePtr);
4214 
4215  // 3. Perform the actual call.
4216 
4217  // Deactivate any cleanups that we're supposed to do immediately before
4218  // the call.
4219  if (!CallArgs.getCleanupsToDeactivate().empty())
4220  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4221 
4222  // Assert that the arguments we computed match up. The IR verifier
4223  // will catch this, but this is a common enough source of problems
4224  // during IRGen changes that it's way better for debugging to catch
4225  // it ourselves here.
4226 #ifndef NDEBUG
4227  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4228  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4229  // Inalloca argument can have different type.
4230  if (IRFunctionArgs.hasInallocaArg() &&
4231  i == IRFunctionArgs.getInallocaArgNo())
4232  continue;
4233  if (i < IRFuncTy->getNumParams())
4234  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4235  }
4236 #endif
4237 
4238  // Compute the calling convention and attributes.
4239  unsigned CallingConv;
4240  llvm::AttributeList Attrs;
4241  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4242  Callee.getAbstractInfo(), Attrs, CallingConv,
4243  /*AttrOnCallSite=*/true);
4244 
4245  // Apply some call-site-specific attributes.
4246  // TODO: work this into building the attribute set.
4247 
4248  // Apply always_inline to all calls within flatten functions.
4249  // FIXME: should this really take priority over __try, below?
4250  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4251  !(Callee.getAbstractInfo().getCalleeDecl() &&
4252  Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4253  Attrs =
4254  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4255  llvm::Attribute::AlwaysInline);
4256  }
4257 
4258  // Disable inlining inside SEH __try blocks.
4259  if (isSEHTryScope()) {
4260  Attrs =
4261  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4262  llvm::Attribute::NoInline);
4263  }
4264 
4265  // Decide whether to use a call or an invoke.
4266  bool CannotThrow;
4267  if (currentFunctionUsesSEHTry()) {
4268  // SEH cares about asynchronous exceptions, so everything can "throw."
4269  CannotThrow = false;
4270  } else if (isCleanupPadScope() &&
4272  // The MSVC++ personality will implicitly terminate the program if an
4273  // exception is thrown during a cleanup outside of a try/catch.
4274  // We don't need to model anything in IR to get this behavior.
4275  CannotThrow = true;
4276  } else {
4277  // Otherwise, nounwind call sites will never throw.
4278  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4279  llvm::Attribute::NoUnwind);
4280  }
4281 
4282  // If we made a temporary, be sure to clean up after ourselves. Note that we
4283  // can't depend on being inside of an ExprWithCleanups, so we need to manually
4284  // pop this cleanup later on. Being eager about this is OK, since this
4285  // temporary is 'invisible' outside of the callee.
4286  if (UnusedReturnSizePtr)
4287  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4288  UnusedReturnSizePtr);
4289 
4290  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4291 
4293  getBundlesForFunclet(CalleePtr);
4294 
4295  // Emit the actual call/invoke instruction.
4296  llvm::CallSite CS;
4297  if (!InvokeDest) {
4298  CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4299  } else {
4300  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4301  CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4302  BundleList);
4303  EmitBlock(Cont);
4304  }
4305  llvm::Instruction *CI = CS.getInstruction();
4306  if (callOrInvoke)
4307  *callOrInvoke = CI;
4308 
4309  // Apply the attributes and calling convention.
4310  CS.setAttributes(Attrs);
4311  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4312 
4313  // Apply various metadata.
4314 
4315  if (!CI->getType()->isVoidTy())
4316  CI->setName("call");
4317 
4318  // Insert instrumentation or attach profile metadata at indirect call sites.
4319  // For more details, see the comment before the definition of
4320  // IPVK_IndirectCallTarget in InstrProfData.inc.
4321  if (!CS.getCalledFunction())
4322  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4323  CI, CalleePtr);
4324 
4325  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4326  // optimizer it can aggressively ignore unwind edges.
4327  if (CGM.getLangOpts().ObjCAutoRefCount)
4328  AddObjCARCExceptionMetadata(CI);
4329 
4330  // Suppress tail calls if requested.
4331  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4332  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4333  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4334  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4335  }
4336 
4337  // 4. Finish the call.
4338 
4339  // If the call doesn't return, finish the basic block and clear the
4340  // insertion point; this allows the rest of IRGen to discard
4341  // unreachable code.
4342  if (CS.doesNotReturn()) {
4343  if (UnusedReturnSizePtr)
4344  PopCleanupBlock();
4345 
4346  // Strip away the noreturn attribute to better diagnose unreachable UB.
4347  if (SanOpts.has(SanitizerKind::Unreachable)) {
4348  if (auto *F = CS.getCalledFunction())
4349  F->removeFnAttr(llvm::Attribute::NoReturn);
4350  CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4351  llvm::Attribute::NoReturn);
4352  }
4353 
4354  EmitUnreachable(Loc);
4355  Builder.ClearInsertionPoint();
4356 
4357  // FIXME: For now, emit a dummy basic block because expr emitters in
4358  // generally are not ready to handle emitting expressions at unreachable
4359  // points.
4360  EnsureInsertPoint();
4361 
4362  // Return a reasonable RValue.
4363  return GetUndefRValue(RetTy);
4364  }
4365 
4366  // Perform the swifterror writeback.
4367  if (swiftErrorTemp.isValid()) {
4368  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4369  Builder.CreateStore(errorResult, swiftErrorArg);
4370  }
4371 
4372  // Emit any call-associated writebacks immediately. Arguably this
4373  // should happen after any return-value munging.
4374  if (CallArgs.hasWritebacks())
4375  emitWritebacks(*this, CallArgs);
4376 
4377  // The stack cleanup for inalloca arguments has to run out of the normal
4378  // lexical order, so deactivate it and run it manually here.
4379  CallArgs.freeArgumentMemory(*this);
4380 
4381  // Extract the return value.
4382  RValue Ret = [&] {
4383  switch (RetAI.getKind()) {
4385  auto coercionType = RetAI.getCoerceAndExpandType();
4386  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4387 
4388  Address addr = SRetPtr;
4389  addr = Builder.CreateElementBitCast(addr, coercionType);
4390 
4391  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4392  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4393 
4394  unsigned unpaddedIndex = 0;
4395  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4396  llvm::Type *eltType = coercionType->getElementType(i);
4397  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4398  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4399  llvm::Value *elt = CI;
4400  if (requiresExtract)
4401  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4402  else
4403  assert(unpaddedIndex == 0);
4404  Builder.CreateStore(elt, eltAddr);
4405  }
4406  // FALLTHROUGH
4407  LLVM_FALLTHROUGH;
4408  }
4409 
4410  case ABIArgInfo::InAlloca:
4411  case ABIArgInfo::Indirect: {
4412  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4413  if (UnusedReturnSizePtr)
4414  PopCleanupBlock();
4415  return ret;
4416  }
4417 
4418  case ABIArgInfo::Ignore:
4419  // If we are ignoring an argument that had a result, make sure to
4420  // construct the appropriate return value for our caller.
4421  return GetUndefRValue(RetTy);
4422 
4423  case ABIArgInfo::Extend:
4424  case ABIArgInfo::Direct: {
4425  llvm::Type *RetIRTy = ConvertType(RetTy);
4426  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4427  switch (getEvaluationKind(RetTy)) {
4428  case TEK_Complex: {
4429  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4430  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4431  return RValue::getComplex(std::make_pair(Real, Imag));
4432  }
4433  case TEK_Aggregate: {
4434  Address DestPtr = ReturnValue.getValue();
4435  bool DestIsVolatile = ReturnValue.isVolatile();
4436 
4437  if (!DestPtr.isValid()) {
4438  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4439  DestIsVolatile = false;
4440  }
4441  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4442  return RValue::getAggregate(DestPtr);
4443  }
4444  case TEK_Scalar: {
4445  // If the argument doesn't match, perform a bitcast to coerce it. This
4446  // can happen due to trivial type mismatches.
4447  llvm::Value *V = CI;
4448  if (V->getType() != RetIRTy)
4449  V = Builder.CreateBitCast(V, RetIRTy);
4450  return RValue::get(V);
4451  }
4452  }
4453  llvm_unreachable("bad evaluation kind");
4454  }
4455 
4456  Address DestPtr = ReturnValue.getValue();
4457  bool DestIsVolatile = ReturnValue.isVolatile();
4458 
4459  if (!DestPtr.isValid()) {
4460  DestPtr = CreateMemTemp(RetTy, "coerce");
4461  DestIsVolatile = false;
4462  }
4463 
4464  // If the value is offset in memory, apply the offset now.
4465  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4466  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4467 
4468  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4469  }
4470 
4471  case ABIArgInfo::Expand:
4472  llvm_unreachable("Invalid ABI kind for return argument");
4473  }
4474 
4475  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4476  } ();
4477 
4478  // Emit the assume_aligned check on the return value.
4479  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4480  if (Ret.isScalar() && TargetDecl) {
4481  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4482  llvm::Value *OffsetValue = nullptr;
4483  if (const auto *Offset = AA->getOffset())
4484  OffsetValue = EmitScalarExpr(Offset);
4485 
4486  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4487  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4488  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4489  OffsetValue);
4490  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4491  llvm::Value *ParamVal =
4492  CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
4493  *this).getScalarVal();
4494  EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4495  }
4496  }
4497 
4498  return Ret;
4499 }
4500 
4502  if (isVirtual()) {
4503  const CallExpr *CE = getVirtualCallExpr();
4505  CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
4506  CE ? CE->getBeginLoc() : SourceLocation());
4507  }
4508 
4509  return *this;
4510 }
4511 
4512 /* VarArg handling */
4513 
4515  VAListAddr = VE->isMicrosoftABI()
4516  ? EmitMSVAListRef(VE->getSubExpr())
4517  : EmitVAListRef(VE->getSubExpr());
4518  QualType Ty = VE->getType();
4519  if (VE->isMicrosoftABI())
4520  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4521  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4522 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:653
const llvm::DataLayout & getDataLayout() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1622
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:177
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:361
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3570
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Represents a function declaration or definition.
Definition: Decl.h:1722
Address getAddress() const
Definition: CGValue.h:580
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:627
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
Definition: CGCall.cpp:2971
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2497
Complete object ctor.
Definition: ABI.h:26
CanQualType VoidPtrTy
Definition: ASTContext.h:1051
A (possibly-)qualified type.
Definition: Type.h:641
bool isBlockPointerType() const
Definition: Type.h:6220
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses &#39;sret&#39; when used as a return type.
Definition: CGCall.cpp:1500
bool getNoCfCheck() const
Definition: Type.h:3389
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition: CGCall.cpp:259
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:79
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3113
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1112
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:71
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2705
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:562
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:175
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3315
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:187
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:497
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:949
const Decl * getCalleeDecl() const
Definition: CGCall.h:63
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:3931
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:2050
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:846
Extend - Valid only for integer argument types.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1025
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4514
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3108
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:242
bool isVirtual() const
Definition: DeclCXX.h:2096
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Definition: CGCall.cpp:4501
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const Expr * getSubExpr() const
Definition: Expr.h:4152
void addUncopiedAggregate(LValue LV, QualType type)
Definition: CGCall.h:287
bool isVolatile() const
Definition: CGValue.h:301
The base class of the type hierarchy.
Definition: Type.h:1414
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1898
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2171
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:6052
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:958
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:698
const ParmVarDecl * getParamDecl(unsigned I) const
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3752
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:1965
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:372
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2483
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2151
bool hasWritebacks() const
Definition: CGCall.h:312
Default closure variant of a ctor.
Definition: ABI.h:30
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3607
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3782
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
Represents a variable declaration or definition.
Definition: Decl.h:820
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:104
llvm::Instruction * getStackBase() const
Definition: CGCall.h:334
unsigned getNumParams() const
Definition: Type.h:3772
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:178
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1216
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6625
void setCoerceToType(llvm::Type *T)
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3418
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:139
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3351
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:234
llvm::Value * getPointer() const
Definition: Address.h:38
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:300
Address getValue() const
Definition: CGCall.h:381
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
Represents a parameter to a function.
Definition: Decl.h:1541
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:57
void add(RValue rvalue, QualType type)
Definition: CGCall.h:285
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:46
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:420
Represents a struct/union/class.
Definition: Decl.h:3572
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3343
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:348
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2457
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1018
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3179
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2785
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:3873
Address getAddress() const
Definition: CGValue.h:327
unsigned getRegParm() const
Definition: Type.h:3392
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:153
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:3932
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
field_range fields() const
Definition: Decl.h:3763
bool isVolatileQualified() const
Definition: CGValue.h:258
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2198
Represents a member of a struct/union/class.
Definition: Decl.h:2554
CharUnits getAlignment() const
Definition: CGValue.h:316
RequiredArgs getRequiredArgs() const
bool isUsingInAlloca() const
Returns if we&#39;re using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:339
unsigned getFunctionScopeIndex() const
Returns the index of this parameter in its prototype or method scope.
Definition: Decl.h:1594
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isOrdinary() const
Definition: CGCall.h:169
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:266
CharUnits getArgStructAlignment() const
bool isReferenceType() const
Definition: Type.h:6224
Interesting information about a specific parameter that can&#39;t simply be reflected in parameter&#39;s type...
Definition: Type.h:3557
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2188
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that&#39;s being passed call-by-writeback.
Definition: CGCall.cpp:3207
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:514
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
bool isVirtual() const
Definition: CGCall.h:187
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Decl.h:746
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:321
bool getProducesResult() const
Definition: Type.h:3387
llvm::FunctionType * getFunctionType() const
Definition: CGCall.h:203
bool isGLValue() const
Definition: Expr.h:252
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:285
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2578
void copyInto(CodeGenFunction &CGF, Address A) const
Definition: CGCall.cpp:3540
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:157
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
uint32_t Offset
Definition: CacheTokens.cpp:43
llvm::StructType * getCoerceAndExpandType() const
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:215
Wrapper for source info for functions.
Definition: TypeLoc.h:1402
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:109
unsigned getInAllocaFieldIndex() const
const_arg_iterator arg_begin() const
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:66
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:390
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1816
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:134
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:468
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:702
Values of this type can never be null.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
bool isSimple() const
Definition: CGValue.h:252
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:273
bool isInstance() const
Definition: DeclCXX.h:2079
An ordinary object is located at an address in memory.
Definition: Specifiers.h:126
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1674
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:106
FunctionType::ExtInfo getExtInfo() const
QualType getReturnType() const
Definition: DeclObjC.h:331
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:737
bool getNoReturn() const
Definition: Type.h:3386
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:84
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:71
bool getNoCallerSavedRegs() const
Definition: Type.h:3388
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3557
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:510
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Derives the &#39;this&#39; type for codegen purposes, i.e.
Definition: CGCall.cpp:73
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Definition: TargetInfo.h:305
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3445
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:499
Represents a K&R-style &#39;int foo()&#39; function, which has no information available about its arguments...
Definition: Type.h:3501
bool hasAttr() const
Definition: DeclBase.h:544
CanQualType getReturnType() const
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:359
bool isValid() const
Definition: Address.h:36
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1627
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3536
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:4157
const TargetCodeGenInfo & getTargetCodeGenInfo()
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
writeback_const_range writebacks() const
Definition: CGCall.h:317
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:306
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:3056
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4134
Address Temporary
The temporary alloca.
Definition: CGCall.h:271
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:107
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:274
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:3035
Expr - This represents one expression.
Definition: Expr.h:106
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2652
static Address invalid()
Definition: Address.h:35
llvm::Type * getUnpaddedCoerceAndExpandType() const
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:3030
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:705
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:88
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:66
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:695
bool getHasRegParm() const
Definition: Type.h:3390
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6688
bool isObjCRetainableType() const
Definition: Type.cpp:3942
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2705
llvm::Constant * objc_retain
id objc_retain(id);
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:356
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2566
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:3682
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when &#39;sret&#39; is used as a return type...
Definition: CGCall.cpp:1505
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:640
QualType getType() const
Definition: Expr.h:128
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1346
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes &#39;this&#39; as the first parameter followed by varargs.
Definition: CGCall.cpp:529
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2723
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:197
llvm::PointerType * AllocaInt8PtrTy
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1865
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:2010
ASTContext & getContext() const
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:414
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1162
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:236
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:35
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1268
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:412
CanQualType getCanonicalTypeUnqualified() const
LValue getKnownLValue() const
Definition: CGCall.h:240
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Definition: Type.h:4249
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1293
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:168
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses &#39;fpret&#39; when used as a return type.
Definition: CGCall.cpp:1510
CanProxy< U > castAs() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3196
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3368
Encodes a location in the source.
QualType getReturnType() const
Definition: Type.h:3469
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2072
A saved depth on the scope stack.
Definition: EHScopeStack.h:107
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:290
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3741
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1246
CallingConv getCC() const
Definition: Type.h:3399
const Decl * getDecl() const
Definition: GlobalDecl.h:64
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C &#39;SEL&#39; type.
Definition: ASTContext.h:1863
An aggregate value slot.
Definition: CGValue.h:437
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:455
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2051
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2417
const_arg_iterator arg_end() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ObjCEntrypoints & getObjCEntrypoints() const
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3335
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:713
CanQualType VoidTy
Definition: ASTContext.h:1023
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
bool isAnyPointerType() const
Definition: Type.h:6216
An aligned address.
Definition: Address.h:25
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after...
Definition: Type.h:1159
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:711
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:178
All available information about a concrete callee.
Definition: CGCall.h:67
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:364
Complete object dtor.
Definition: ABI.h:36
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses &#39;fp2ret&#39; when used as a return type.
Definition: CGCall.cpp:1527
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1688
bool hasFlexibleArrayMember() const
Definition: Decl.h:3626
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3807
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:620
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:554
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1126
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:356
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:176
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2376
ExtInfo getExtInfo() const
Definition: Type.h:3480
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:93
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
LValue Source
The original argument.
Definition: CGCall.h:268
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:431
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3710
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:91
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:1000
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:167
void EmitARCIntrinsicUse(ArrayRef< llvm::Value *> values)
Given a number of pointers, inform the optimizer that they&#39;re being intrinsically used up until this ...
Definition: CGObjC.cpp:1806
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2171
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
RValue getRValue(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3530
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:796
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:401
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
uint64_t SanitizerMask
Definition: Sanitizers.h:26
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4239
Complex values, per C99 6.2.5p11.
Definition: Type.h:2437
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
static bool classof(const OMPClause *T)
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:2024
QualType getCanonicalTypeInternal() const
Definition: Type.h:2318
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:6473
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable &#39;self&#39;, remove it.
Definition: CGCall.cpp:2666
CharUnits getIndirectAlign() const
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:44
T * getAttr() const
Definition: DeclBase.h:540
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:645
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:120
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
Expand - Only valid for aggregate argument types.
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2633
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type *>::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:978
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:903
bool isParamDestroyedInCallee() const
Definition: Decl.h:3710
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:445
Represents a base class of a C++ class.
Definition: DeclCXX.h:192
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2074
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:2034
ASTContext & getContext() const
Definition: CodeGenTypes.h:174
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:134
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:526
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl...
Definition: CGCall.cpp:1672
LangAS getAddressSpace() const
Definition: CGValue.h:314
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
RValue getKnownRValue() const
Definition: CGCall.h:244
Represents a C++ struct/union/class.
Definition: DeclCXX.h:308
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:338
bool isVoidType() const
Definition: Type.h:6439
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2209
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6015
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1232
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.