clang  6.0.0svn
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
47  switch (CC) {
48  default: return llvm::CallingConv::C;
49  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53  case CC_Win64: return llvm::CallingConv::Win64;
54  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58  // TODO: Add support for __pascal to LLVM.
60  // TODO: Add support for __vectorcall to LLVM.
61  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66  case CC_Swift: return llvm::CallingConv::Swift;
67  }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
71 /// qualification.
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76 }
77 
78 /// Returns the canonical formal type of the given C++ method.
80  return MD->getType()->getCanonicalTypeUnqualified()
82 }
83 
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type. Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
90 }
91 
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
96  // When translating an unprototyped function type, always use a
97  // variadic type.
98  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99  /*instanceMethod=*/false,
100  /*chainCall=*/false, None,
101  FTNP->getExtInfo(), {}, RequiredArgs(0));
102 }
103 
106  const FunctionProtoType *proto,
107  unsigned prefixArgs,
108  unsigned totalArgs) {
109  assert(proto->hasExtParameterInfos());
110  assert(paramInfos.size() <= prefixArgs);
111  assert(proto->getNumParams() + prefixArgs <= totalArgs);
112 
113  paramInfos.reserve(totalArgs);
114 
115  // Add default infos for any prefix args that don't already have infos.
116  paramInfos.resize(prefixArgs);
117 
118  // Add infos for the prototype.
119  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120  paramInfos.push_back(ParamInfo);
121  // pass_object_size params have no parameter info.
122  if (ParamInfo.hasPassObjectSize())
123  paramInfos.emplace_back();
124  }
125 
126  assert(paramInfos.size() <= totalArgs &&
127  "Did we forget to insert pass_object_size args?");
128  // Add default infos for the variadic and/or suffix arguments.
129  paramInfos.resize(totalArgs);
130 }
131 
132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
138  // Fast path: don't touch param info if we don't need to.
139  if (!FPT->hasExtParameterInfos()) {
140  assert(paramInfos.empty() &&
141  "We have paramInfos, but the prototype doesn't?");
142  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143  return;
144  }
145 
146  unsigned PrefixSize = prefix.size();
147  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148  // parameters; the only thing that can change this is the presence of
149  // pass_object_size. So, we preallocate for the common case.
150  prefix.reserve(prefix.size() + FPT->getNumParams());
151 
152  auto ExtInfos = FPT->getExtParameterInfos();
153  assert(ExtInfos.size() == FPT->getNumParams());
154  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155  prefix.push_back(FPT->getParamType(I));
156  if (ExtInfos[I].hasPassObjectSize())
157  prefix.push_back(CGT.getContext().getSizeType());
158  }
159 
160  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161  prefix.size());
162 }
163 
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
170  const FunctionDecl *FD) {
172  RequiredArgs Required =
173  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174  // FIXME: Kill copy.
175  appendParameterTypes(CGT, prefix, paramInfos, FTP);
176  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177 
178  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179  /*chainCall=*/false, prefix,
180  FTP->getExtInfo(), paramInfos,
181  Required);
182 }
183 
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
188  const FunctionDecl *FD) {
190  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191  FTP, FD);
192 }
193 
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195  // Set the appropriate calling convention for the Function.
196  if (D->hasAttr<StdCallAttr>())
197  return CC_X86StdCall;
198 
199  if (D->hasAttr<FastCallAttr>())
200  return CC_X86FastCall;
201 
202  if (D->hasAttr<RegCallAttr>())
203  return CC_X86RegCall;
204 
205  if (D->hasAttr<ThisCallAttr>())
206  return CC_X86ThisCall;
207 
208  if (D->hasAttr<VectorCallAttr>())
209  return CC_X86VectorCall;
210 
211  if (D->hasAttr<PascalAttr>())
212  return CC_X86Pascal;
213 
214  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216 
217  if (D->hasAttr<IntelOclBiccAttr>())
218  return CC_IntelOclBicc;
219 
220  if (D->hasAttr<MSABIAttr>())
221  return IsWindows ? CC_C : CC_Win64;
222 
223  if (D->hasAttr<SysVABIAttr>())
224  return IsWindows ? CC_X86_64SysV : CC_C;
225 
226  if (D->hasAttr<PreserveMostAttr>())
227  return CC_PreserveMost;
228 
229  if (D->hasAttr<PreserveAllAttr>())
230  return CC_PreserveAll;
231 
232  return CC_C;
233 }
234 
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 /// so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
243  const FunctionProtoType *FTP,
244  const CXXMethodDecl *MD) {
246 
247  // Add the 'this' pointer.
248  if (RD)
249  argTypes.push_back(GetThisType(Context, RD));
250  else
251  argTypes.push_back(Context.VoidPtrTy);
252 
254  *this, true, argTypes,
256 }
257 
258 /// Arrange the argument and result information for a declaration or
259 /// definition of the given C++ non-static member function. The
260 /// member function must be an ordinary function, i.e. not a
261 /// constructor or destructor.
262 const CGFunctionInfo &
264  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
265  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
266 
268 
269  if (MD->isInstance()) {
270  // The abstract case is perfectly fine.
271  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
272  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
273  }
274 
275  return arrangeFreeFunctionType(prototype, MD);
276 }
277 
279  const InheritedConstructor &Inherited, CXXCtorType Type) {
280  // Parameters are unnecessary if we're constructing a base class subobject
281  // and the inherited constructor lives in a virtual base.
282  return Type == Ctor_Complete ||
283  !Inherited.getShadowDecl()->constructsVirtualBase() ||
284  !Target.getCXXABI().hasConstructorVariants();
285  }
286 
287 const CGFunctionInfo &
289  StructorType Type) {
290 
293  argTypes.push_back(GetThisType(Context, MD->getParent()));
294 
295  bool PassParams = true;
296 
297  GlobalDecl GD;
298  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
299  GD = GlobalDecl(CD, toCXXCtorType(Type));
300 
301  // A base class inheriting constructor doesn't get forwarded arguments
302  // needed to construct a virtual base (or base class thereof).
303  if (auto Inherited = CD->getInheritedConstructor())
304  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
305  } else {
306  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
307  GD = GlobalDecl(DD, toCXXDtorType(Type));
308  }
309 
311 
312  // Add the formal parameters.
313  if (PassParams)
314  appendParameterTypes(*this, argTypes, paramInfos, FTP);
315 
316  CGCXXABI::AddedStructorArgs AddedArgs =
317  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
318  if (!paramInfos.empty()) {
319  // Note: prefix implies after the first param.
320  if (AddedArgs.Prefix)
321  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
323  if (AddedArgs.Suffix)
324  paramInfos.append(AddedArgs.Suffix,
326  }
327 
328  RequiredArgs required =
329  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
331 
332  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
333  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
334  ? argTypes.front()
335  : TheCXXABI.hasMostDerivedReturn(GD)
336  ? CGM.getContext().VoidPtrTy
337  : Context.VoidTy;
338  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
339  /*chainCall=*/false, argTypes, extInfo,
340  paramInfos, required);
341 }
342 
346  for (auto &arg : args)
347  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
348  return argTypes;
349 }
350 
354  for (auto &arg : args)
355  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
356  return argTypes;
357 }
358 
361  unsigned prefixArgs, unsigned totalArgs) {
363  if (proto->hasExtParameterInfos()) {
364  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
365  }
366  return result;
367 }
368 
369 /// Arrange a call to a C++ method, passing the given arguments.
370 ///
371 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
372 /// parameter.
373 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
374 /// args.
375 /// PassProtoArgs indicates whether `args` has args for the parameters in the
376 /// given CXXConstructorDecl.
377 const CGFunctionInfo &
379  const CXXConstructorDecl *D,
380  CXXCtorType CtorKind,
381  unsigned ExtraPrefixArgs,
382  unsigned ExtraSuffixArgs,
383  bool PassProtoArgs) {
384  // FIXME: Kill copy.
386  for (const auto &Arg : args)
387  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
388 
389  // +1 for implicit this, which should always be args[0].
390  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
391 
393  RequiredArgs Required =
394  RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
395  GlobalDecl GD(D, CtorKind);
396  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
397  ? ArgTypes.front()
398  : TheCXXABI.hasMostDerivedReturn(GD)
399  ? CGM.getContext().VoidPtrTy
400  : Context.VoidTy;
401 
402  FunctionType::ExtInfo Info = FPT->getExtInfo();
404  // If the prototype args are elided, we should only have ABI-specific args,
405  // which never have param info.
406  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
407  // ABI-specific suffix arguments are treated the same as variadic arguments.
408  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
409  ArgTypes.size());
410  }
411  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
412  /*chainCall=*/false, ArgTypes, Info,
413  ParamInfos, Required);
414 }
415 
416 /// Arrange the argument and result information for the declaration or
417 /// definition of the given function.
418 const CGFunctionInfo &
420  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
421  if (MD->isInstance())
422  return arrangeCXXMethodDeclaration(MD);
423 
425 
426  assert(isa<FunctionType>(FTy));
427 
428  // When declaring a function without a prototype, always use a
429  // non-variadic type.
432  noProto->getReturnType(), /*instanceMethod=*/false,
433  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
434  }
435 
437 }
438 
439 /// Arrange the argument and result information for the declaration or
440 /// definition of an Objective-C method.
441 const CGFunctionInfo &
443  // It happens that this is the same as a call with no optional
444  // arguments, except also using the formal 'self' type.
446 }
447 
448 /// Arrange the argument and result information for the function type
449 /// through which to perform a send to the given Objective-C method,
450 /// using the given receiver type. The receiver type is not always
451 /// the 'self' type of the method or even an Objective-C pointer type.
452 /// This is *not* the right method for actually performing such a
453 /// message send, due to the possibility of optional arguments.
454 const CGFunctionInfo &
456  QualType receiverType) {
459  argTys.push_back(Context.getCanonicalParamType(receiverType));
460  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
461  // FIXME: Kill copy?
462  for (const auto *I : MD->parameters()) {
463  argTys.push_back(Context.getCanonicalParamType(I->getType()));
465  I->hasAttr<NoEscapeAttr>());
466  extParamInfos.push_back(extParamInfo);
467  }
468 
469  FunctionType::ExtInfo einfo;
470  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
471  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
472 
473  if (getContext().getLangOpts().ObjCAutoRefCount &&
474  MD->hasAttr<NSReturnsRetainedAttr>())
475  einfo = einfo.withProducesResult(true);
476 
477  RequiredArgs required =
478  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
479 
481  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
482  /*chainCall=*/false, argTys, einfo, extParamInfos, required);
483 }
484 
485 const CGFunctionInfo &
487  const CallArgList &args) {
488  auto argTypes = getArgTypesForCall(Context, args);
489  FunctionType::ExtInfo einfo;
490 
492  GetReturnType(returnType), /*instanceMethod=*/false,
493  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
494 }
495 
496 const CGFunctionInfo &
498  // FIXME: Do we need to handle ObjCMethodDecl?
499  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
500 
501  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
503 
504  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
506 
507  return arrangeFunctionDeclaration(FD);
508 }
509 
510 /// Arrange a thunk that takes 'this' as the first parameter followed by
511 /// varargs. Return a void pointer, regardless of the actual return type.
512 /// The body of the thunk will end in a musttail call to a function of the
513 /// correct type, and the caller will bitcast the function to the correct
514 /// prototype.
515 const CGFunctionInfo &
517  assert(MD->isVirtual() && "only virtual memptrs have thunks");
519  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
520  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
521  /*chainCall=*/false, ArgTys,
522  FTP->getExtInfo(), {}, RequiredArgs(1));
523 }
524 
525 const CGFunctionInfo &
527  CXXCtorType CT) {
528  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
529 
532  const CXXRecordDecl *RD = CD->getParent();
533  ArgTys.push_back(GetThisType(Context, RD));
534  if (CT == Ctor_CopyingClosure)
535  ArgTys.push_back(*FTP->param_type_begin());
536  if (RD->getNumVBases() > 0)
537  ArgTys.push_back(Context.IntTy);
539  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
540  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
541  /*chainCall=*/false, ArgTys,
542  FunctionType::ExtInfo(CC), {},
544 }
545 
546 /// Arrange a call as unto a free function, except possibly with an
547 /// additional number of formal parameters considered required.
548 static const CGFunctionInfo &
550  CodeGenModule &CGM,
551  const CallArgList &args,
552  const FunctionType *fnType,
553  unsigned numExtraRequiredArgs,
554  bool chainCall) {
555  assert(args.size() >= numExtraRequiredArgs);
556 
558 
559  // In most cases, there are no optional arguments.
560  RequiredArgs required = RequiredArgs::All;
561 
562  // If we have a variadic prototype, the required arguments are the
563  // extra prefix plus the arguments in the prototype.
564  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
565  if (proto->isVariadic())
566  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
567 
568  if (proto->hasExtParameterInfos())
569  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
570  args.size());
571 
572  // If we don't have a prototype at all, but we're supposed to
573  // explicitly use the variadic convention for unprototyped calls,
574  // treat all of the arguments as required but preserve the nominal
575  // possibility of variadics.
576  } else if (CGM.getTargetCodeGenInfo()
577  .isNoProtoCallVariadic(args,
578  cast<FunctionNoProtoType>(fnType))) {
579  required = RequiredArgs(args.size());
580  }
581 
582  // FIXME: Kill copy.
584  for (const auto &arg : args)
585  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
587  /*instanceMethod=*/false, chainCall,
588  argTypes, fnType->getExtInfo(), paramInfos,
589  required);
590 }
591 
592 /// Figure out the rules for calling a function with the given formal
593 /// type using the given arguments. The arguments are necessary
594 /// because the function might be unprototyped, in which case it's
595 /// target-dependent in crazy ways.
596 const CGFunctionInfo &
598  const FunctionType *fnType,
599  bool chainCall) {
600  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
601  chainCall ? 1 : 0, chainCall);
602 }
603 
604 /// A block function is essentially a free function with an
605 /// extra implicit argument.
606 const CGFunctionInfo &
608  const FunctionType *fnType) {
609  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
610  /*chainCall=*/false);
611 }
612 
613 const CGFunctionInfo &
615  const FunctionArgList &params) {
616  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
617  auto argTypes = getArgTypesForDeclaration(Context, params);
618 
620  GetReturnType(proto->getReturnType()),
621  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
622  proto->getExtInfo(), paramInfos,
623  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
624 }
625 
626 const CGFunctionInfo &
628  const CallArgList &args) {
629  // FIXME: Kill copy.
631  for (const auto &Arg : args)
632  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
634  GetReturnType(resultType), /*instanceMethod=*/false,
635  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
636  /*paramInfos=*/ {}, RequiredArgs::All);
637 }
638 
639 const CGFunctionInfo &
641  const FunctionArgList &args) {
642  auto argTypes = getArgTypesForDeclaration(Context, args);
643 
645  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
646  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
647 }
648 
649 const CGFunctionInfo &
651  ArrayRef<CanQualType> argTypes) {
653  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
654  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
655 }
656 
657 /// Arrange a call to a C++ method, passing the given arguments.
658 ///
659 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
660 /// does not count `this`.
661 const CGFunctionInfo &
663  const FunctionProtoType *proto,
664  RequiredArgs required,
665  unsigned numPrefixArgs) {
666  assert(numPrefixArgs + 1 <= args.size() &&
667  "Emitting a call with less args than the required prefix?");
668  // Add one to account for `this`. It's a bit awkward here, but we don't count
669  // `this` in similar places elsewhere.
670  auto paramInfos =
671  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
672 
673  // FIXME: Kill copy.
674  auto argTypes = getArgTypesForCall(Context, args);
675 
676  FunctionType::ExtInfo info = proto->getExtInfo();
678  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
679  /*chainCall=*/false, argTypes, info, paramInfos, required);
680 }
681 
684  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
686 }
687 
688 const CGFunctionInfo &
690  const CallArgList &args) {
691  assert(signature.arg_size() <= args.size());
692  if (signature.arg_size() == args.size())
693  return signature;
694 
696  auto sigParamInfos = signature.getExtParameterInfos();
697  if (!sigParamInfos.empty()) {
698  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
699  paramInfos.resize(args.size());
700  }
701 
702  auto argTypes = getArgTypesForCall(Context, args);
703 
704  assert(signature.getRequiredArgs().allowsOptionalArgs());
705  return arrangeLLVMFunctionInfo(signature.getReturnType(),
706  signature.isInstanceMethod(),
707  signature.isChainCall(),
708  argTypes,
709  signature.getExtInfo(),
710  paramInfos,
711  signature.getRequiredArgs());
712 }
713 
714 namespace clang {
715 namespace CodeGen {
717 }
718 }
719 
720 /// Arrange the argument and result information for an abstract value
721 /// of a given function type. This is the method which all of the
722 /// above functions ultimately defer to.
723 const CGFunctionInfo &
725  bool instanceMethod,
726  bool chainCall,
727  ArrayRef<CanQualType> argTypes,
730  RequiredArgs required) {
731  assert(std::all_of(argTypes.begin(), argTypes.end(),
732  [](CanQualType T) { return T.isCanonicalAsParam(); }));
733 
734  // Lookup or create unique function info.
735  llvm::FoldingSetNodeID ID;
736  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
737  required, resultType, argTypes);
738 
739  void *insertPos = nullptr;
740  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
741  if (FI)
742  return *FI;
743 
744  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
745 
746  // Construct the function info. We co-allocate the ArgInfos.
747  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
748  paramInfos, resultType, argTypes, required);
749  FunctionInfos.InsertNode(FI, insertPos);
750 
751  bool inserted = FunctionsBeingProcessed.insert(FI).second;
752  (void)inserted;
753  assert(inserted && "Recursively being processed?");
754 
755  // Compute ABI information.
756  if (CC == llvm::CallingConv::SPIR_KERNEL) {
757  // Force target independent argument handling for the host visible
758  // kernel functions.
759  computeSPIRKernelABIInfo(CGM, *FI);
760  } else if (info.getCC() == CC_Swift) {
761  swiftcall::computeABIInfo(CGM, *FI);
762  } else {
763  getABIInfo().computeInfo(*FI);
764  }
765 
766  // Loop over all of the computed argument and return value info. If any of
767  // them are direct or extend without a specified coerce type, specify the
768  // default now.
769  ABIArgInfo &retInfo = FI->getReturnInfo();
770  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
771  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
772 
773  for (auto &I : FI->arguments())
774  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
775  I.info.setCoerceToType(ConvertType(I.type));
776 
777  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
778  assert(erased && "Not in set?");
779 
780  return *FI;
781 }
782 
784  bool instanceMethod,
785  bool chainCall,
786  const FunctionType::ExtInfo &info,
787  ArrayRef<ExtParameterInfo> paramInfos,
788  CanQualType resultType,
789  ArrayRef<CanQualType> argTypes,
790  RequiredArgs required) {
791  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
792 
793  void *buffer =
794  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
795  argTypes.size() + 1, paramInfos.size()));
796 
797  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
798  FI->CallingConvention = llvmCC;
799  FI->EffectiveCallingConvention = llvmCC;
800  FI->ASTCallingConvention = info.getCC();
801  FI->InstanceMethod = instanceMethod;
802  FI->ChainCall = chainCall;
803  FI->NoReturn = info.getNoReturn();
804  FI->ReturnsRetained = info.getProducesResult();
805  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
806  FI->Required = required;
807  FI->HasRegParm = info.getHasRegParm();
808  FI->RegParm = info.getRegParm();
809  FI->ArgStruct = nullptr;
810  FI->ArgStructAlign = 0;
811  FI->NumArgs = argTypes.size();
812  FI->HasExtParameterInfos = !paramInfos.empty();
813  FI->getArgsBuffer()[0].type = resultType;
814  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
815  FI->getArgsBuffer()[i + 1].type = argTypes[i];
816  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
817  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
818  return FI;
819 }
820 
821 /***/
822 
823 namespace {
824 // ABIArgInfo::Expand implementation.
825 
826 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
827 struct TypeExpansion {
828  enum TypeExpansionKind {
829  // Elements of constant arrays are expanded recursively.
830  TEK_ConstantArray,
831  // Record fields are expanded recursively (but if record is a union, only
832  // the field with the largest size is expanded).
833  TEK_Record,
834  // For complex types, real and imaginary parts are expanded recursively.
835  TEK_Complex,
836  // All other types are not expandable.
837  TEK_None
838  };
839 
840  const TypeExpansionKind Kind;
841 
842  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
843  virtual ~TypeExpansion() {}
844 };
845 
846 struct ConstantArrayExpansion : TypeExpansion {
847  QualType EltTy;
848  uint64_t NumElts;
849 
850  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
851  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
852  static bool classof(const TypeExpansion *TE) {
853  return TE->Kind == TEK_ConstantArray;
854  }
855 };
856 
857 struct RecordExpansion : TypeExpansion {
859 
861 
862  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
864  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
865  Fields(std::move(Fields)) {}
866  static bool classof(const TypeExpansion *TE) {
867  return TE->Kind == TEK_Record;
868  }
869 };
870 
871 struct ComplexExpansion : TypeExpansion {
872  QualType EltTy;
873 
874  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
875  static bool classof(const TypeExpansion *TE) {
876  return TE->Kind == TEK_Complex;
877  }
878 };
879 
880 struct NoExpansion : TypeExpansion {
881  NoExpansion() : TypeExpansion(TEK_None) {}
882  static bool classof(const TypeExpansion *TE) {
883  return TE->Kind == TEK_None;
884  }
885 };
886 } // namespace
887 
888 static std::unique_ptr<TypeExpansion>
889 getTypeExpansion(QualType Ty, const ASTContext &Context) {
890  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
891  return llvm::make_unique<ConstantArrayExpansion>(
892  AT->getElementType(), AT->getSize().getZExtValue());
893  }
894  if (const RecordType *RT = Ty->getAs<RecordType>()) {
897  const RecordDecl *RD = RT->getDecl();
898  assert(!RD->hasFlexibleArrayMember() &&
899  "Cannot expand structure with flexible array.");
900  if (RD->isUnion()) {
901  // Unions can be here only in degenerative cases - all the fields are same
902  // after flattening. Thus we have to use the "largest" field.
903  const FieldDecl *LargestFD = nullptr;
904  CharUnits UnionSize = CharUnits::Zero();
905 
906  for (const auto *FD : RD->fields()) {
907  // Skip zero length bitfields.
908  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
909  continue;
910  assert(!FD->isBitField() &&
911  "Cannot expand structure with bit-field members.");
912  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
913  if (UnionSize < FieldSize) {
914  UnionSize = FieldSize;
915  LargestFD = FD;
916  }
917  }
918  if (LargestFD)
919  Fields.push_back(LargestFD);
920  } else {
921  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
922  assert(!CXXRD->isDynamicClass() &&
923  "cannot expand vtable pointers in dynamic classes");
924  for (const CXXBaseSpecifier &BS : CXXRD->bases())
925  Bases.push_back(&BS);
926  }
927 
928  for (const auto *FD : RD->fields()) {
929  // Skip zero length bitfields.
930  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
931  continue;
932  assert(!FD->isBitField() &&
933  "Cannot expand structure with bit-field members.");
934  Fields.push_back(FD);
935  }
936  }
937  return llvm::make_unique<RecordExpansion>(std::move(Bases),
938  std::move(Fields));
939  }
940  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
941  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
942  }
943  return llvm::make_unique<NoExpansion>();
944 }
945 
946 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
947  auto Exp = getTypeExpansion(Ty, Context);
948  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
949  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
950  }
951  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
952  int Res = 0;
953  for (auto BS : RExp->Bases)
954  Res += getExpansionSize(BS->getType(), Context);
955  for (auto FD : RExp->Fields)
956  Res += getExpansionSize(FD->getType(), Context);
957  return Res;
958  }
959  if (isa<ComplexExpansion>(Exp.get()))
960  return 2;
961  assert(isa<NoExpansion>(Exp.get()));
962  return 1;
963 }
964 
965 void
968  auto Exp = getTypeExpansion(Ty, Context);
969  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
970  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
971  getExpandedTypes(CAExp->EltTy, TI);
972  }
973  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
974  for (auto BS : RExp->Bases)
975  getExpandedTypes(BS->getType(), TI);
976  for (auto FD : RExp->Fields)
977  getExpandedTypes(FD->getType(), TI);
978  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
979  llvm::Type *EltTy = ConvertType(CExp->EltTy);
980  *TI++ = EltTy;
981  *TI++ = EltTy;
982  } else {
983  assert(isa<NoExpansion>(Exp.get()));
984  *TI++ = ConvertType(Ty);
985  }
986 }
987 
989  ConstantArrayExpansion *CAE,
990  Address BaseAddr,
991  llvm::function_ref<void(Address)> Fn) {
992  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
993  CharUnits EltAlign =
994  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
995 
996  for (int i = 0, n = CAE->NumElts; i < n; i++) {
997  llvm::Value *EltAddr =
998  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
999  Fn(Address(EltAddr, EltAlign));
1000  }
1001 }
1002 
1003 void CodeGenFunction::ExpandTypeFromArgs(
1005  assert(LV.isSimple() &&
1006  "Unexpected non-simple lvalue during struct expansion.");
1007 
1008  auto Exp = getTypeExpansion(Ty, getContext());
1009  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1010  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1011  [&](Address EltAddr) {
1012  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1013  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1014  });
1015  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1016  Address This = LV.getAddress();
1017  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1018  // Perform a single step derived-to-base conversion.
1019  Address Base =
1020  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1021  /*NullCheckValue=*/false, SourceLocation());
1022  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1023 
1024  // Recurse onto bases.
1025  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1026  }
1027  for (auto FD : RExp->Fields) {
1028  // FIXME: What are the right qualifiers here?
1029  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1030  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1031  }
1032  } else if (isa<ComplexExpansion>(Exp.get())) {
1033  auto realValue = *AI++;
1034  auto imagValue = *AI++;
1035  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1036  } else {
1037  assert(isa<NoExpansion>(Exp.get()));
1038  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1039  }
1040 }
1041 
1042 void CodeGenFunction::ExpandTypeToArgs(
1043  QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
1044  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1045  auto Exp = getTypeExpansion(Ty, getContext());
1046  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1048  [&](Address EltAddr) {
1049  RValue EltRV =
1050  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
1051  ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
1052  });
1053  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1054  Address This = RV.getAggregateAddress();
1055  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1056  // Perform a single step derived-to-base conversion.
1057  Address Base =
1058  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1059  /*NullCheckValue=*/false, SourceLocation());
1060  RValue BaseRV = RValue::getAggregate(Base);
1061 
1062  // Recurse onto bases.
1063  ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
1064  IRCallArgPos);
1065  }
1066 
1067  LValue LV = MakeAddrLValue(This, Ty);
1068  for (auto FD : RExp->Fields) {
1069  RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
1070  ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
1071  IRCallArgPos);
1072  }
1073  } else if (isa<ComplexExpansion>(Exp.get())) {
1074  ComplexPairTy CV = RV.getComplexVal();
1075  IRCallArgs[IRCallArgPos++] = CV.first;
1076  IRCallArgs[IRCallArgPos++] = CV.second;
1077  } else {
1078  assert(isa<NoExpansion>(Exp.get()));
1079  assert(RV.isScalar() &&
1080  "Unexpected non-scalar rvalue during struct expansion.");
1081 
1082  // Insert a bitcast as needed.
1083  llvm::Value *V = RV.getScalarVal();
1084  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1085  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1086  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1087 
1088  IRCallArgs[IRCallArgPos++] = V;
1089  }
1090 }
1091 
1092 /// Create a temporary allocation for the purposes of coercion.
1094  CharUnits MinAlign) {
1095  // Don't use an alignment that's worse than what LLVM would prefer.
1096  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1097  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1098 
1099  return CGF.CreateTempAlloca(Ty, Align);
1100 }
1101 
1102 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1103 /// accessing some number of bytes out of it, try to gep into the struct to get
1104 /// at its inner goodness. Dive as deep as possible without entering an element
1105 /// with an in-memory size smaller than DstSize.
1106 static Address
1108  llvm::StructType *SrcSTy,
1109  uint64_t DstSize, CodeGenFunction &CGF) {
1110  // We can't dive into a zero-element struct.
1111  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1112 
1113  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1114 
1115  // If the first elt is at least as large as what we're looking for, or if the
1116  // first element is the same size as the whole struct, we can enter it. The
1117  // comparison must be made on the store size and not the alloca size. Using
1118  // the alloca size may overstate the size of the load.
1119  uint64_t FirstEltSize =
1120  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1121  if (FirstEltSize < DstSize &&
1122  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1123  return SrcPtr;
1124 
1125  // GEP into the first element.
1126  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1127 
1128  // If the first element is a struct, recurse.
1129  llvm::Type *SrcTy = SrcPtr.getElementType();
1130  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1131  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1132 
1133  return SrcPtr;
1134 }
1135 
1136 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1137 /// are either integers or pointers. This does a truncation of the value if it
1138 /// is too large or a zero extension if it is too small.
1139 ///
1140 /// This behaves as if the value were coerced through memory, so on big-endian
1141 /// targets the high bits are preserved in a truncation, while little-endian
1142 /// targets preserve the low bits.
1144  llvm::Type *Ty,
1145  CodeGenFunction &CGF) {
1146  if (Val->getType() == Ty)
1147  return Val;
1148 
1149  if (isa<llvm::PointerType>(Val->getType())) {
1150  // If this is Pointer->Pointer avoid conversion to and from int.
1151  if (isa<llvm::PointerType>(Ty))
1152  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1153 
1154  // Convert the pointer to an integer so we can play with its width.
1155  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1156  }
1157 
1158  llvm::Type *DestIntTy = Ty;
1159  if (isa<llvm::PointerType>(DestIntTy))
1160  DestIntTy = CGF.IntPtrTy;
1161 
1162  if (Val->getType() != DestIntTy) {
1163  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1164  if (DL.isBigEndian()) {
1165  // Preserve the high bits on big-endian targets.
1166  // That is what memory coercion does.
1167  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1168  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1169 
1170  if (SrcSize > DstSize) {
1171  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1172  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1173  } else {
1174  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1175  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1176  }
1177  } else {
1178  // Little-endian targets preserve the low bits. No shifts required.
1179  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1180  }
1181  }
1182 
1183  if (isa<llvm::PointerType>(Ty))
1184  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1185  return Val;
1186 }
1187 
1188 
1189 
1190 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1191 /// a pointer to an object of type \arg Ty, known to be aligned to
1192 /// \arg SrcAlign bytes.
1193 ///
1194 /// This safely handles the case when the src type is smaller than the
1195 /// destination type; in this situation the values of bits which not
1196 /// present in the src are undefined.
1198  CodeGenFunction &CGF) {
1199  llvm::Type *SrcTy = Src.getElementType();
1200 
1201  // If SrcTy and Ty are the same, just do a load.
1202  if (SrcTy == Ty)
1203  return CGF.Builder.CreateLoad(Src);
1204 
1205  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1206 
1207  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1208  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1209  SrcTy = Src.getType()->getElementType();
1210  }
1211 
1212  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1213 
1214  // If the source and destination are integer or pointer types, just do an
1215  // extension or truncation to the desired type.
1216  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1217  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1218  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1219  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1220  }
1221 
1222  // If load is legal, just bitcast the src pointer.
1223  if (SrcSize >= DstSize) {
1224  // Generally SrcSize is never greater than DstSize, since this means we are
1225  // losing bits. However, this can happen in cases where the structure has
1226  // additional padding, for example due to a user specified alignment.
1227  //
1228  // FIXME: Assert that we aren't truncating non-padding bits when have access
1229  // to that information.
1230  Src = CGF.Builder.CreateBitCast(Src,
1231  Ty->getPointerTo(Src.getAddressSpace()));
1232  return CGF.Builder.CreateLoad(Src);
1233  }
1234 
1235  // Otherwise do coercion through memory. This is stupid, but simple.
1236  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1237  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1238  Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
1239  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1240  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1241  false);
1242  return CGF.Builder.CreateLoad(Tmp);
1243 }
1244 
1245 // Function to store a first-class aggregate into memory. We prefer to
1246 // store the elements rather than the aggregate to be more friendly to
1247 // fast-isel.
1248 // FIXME: Do we need to recurse here?
1250  Address Dest, bool DestIsVolatile) {
1251  // Prefer scalar stores to first-class aggregate stores.
1252  if (llvm::StructType *STy =
1253  dyn_cast<llvm::StructType>(Val->getType())) {
1254  const llvm::StructLayout *Layout =
1255  CGF.CGM.getDataLayout().getStructLayout(STy);
1256 
1257  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1258  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1259  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1260  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1261  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1262  }
1263  } else {
1264  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1265  }
1266 }
1267 
1268 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1269 /// where the source and destination may have different types. The
1270 /// destination is known to be aligned to \arg DstAlign bytes.
1271 ///
1272 /// This safely handles the case when the src type is larger than the
1273 /// destination type; the upper bits of the src will be lost.
1275  Address Dst,
1276  bool DstIsVolatile,
1277  CodeGenFunction &CGF) {
1278  llvm::Type *SrcTy = Src->getType();
1279  llvm::Type *DstTy = Dst.getType()->getElementType();
1280  if (SrcTy == DstTy) {
1281  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1282  return;
1283  }
1284 
1285  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1286 
1287  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1288  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1289  DstTy = Dst.getType()->getElementType();
1290  }
1291 
1292  // If the source and destination are integer or pointer types, just do an
1293  // extension or truncation to the desired type.
1294  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1295  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1296  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1297  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1298  return;
1299  }
1300 
1301  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1302 
1303  // If store is legal, just bitcast the src pointer.
1304  if (SrcSize <= DstSize) {
1305  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1306  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1307  } else {
1308  // Otherwise do coercion through memory. This is stupid, but
1309  // simple.
1310 
1311  // Generally SrcSize is never greater than DstSize, since this means we are
1312  // losing bits. However, this can happen in cases where the structure has
1313  // additional padding, for example due to a user specified alignment.
1314  //
1315  // FIXME: Assert that we aren't truncating non-padding bits when have access
1316  // to that information.
1317  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1318  CGF.Builder.CreateStore(Src, Tmp);
1319  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1320  Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
1321  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1322  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1323  false);
1324  }
1325 }
1326 
1328  const ABIArgInfo &info) {
1329  if (unsigned offset = info.getDirectOffset()) {
1330  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1331  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1332  CharUnits::fromQuantity(offset));
1333  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1334  }
1335  return addr;
1336 }
1337 
1338 namespace {
1339 
1340 /// Encapsulates information about the way function arguments from
1341 /// CGFunctionInfo should be passed to actual LLVM IR function.
1342 class ClangToLLVMArgMapping {
1343  static const unsigned InvalidIndex = ~0U;
1344  unsigned InallocaArgNo;
1345  unsigned SRetArgNo;
1346  unsigned TotalIRArgs;
1347 
1348  /// Arguments of LLVM IR function corresponding to single Clang argument.
1349  struct IRArgs {
1350  unsigned PaddingArgIndex;
1351  // Argument is expanded to IR arguments at positions
1352  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1353  unsigned FirstArgIndex;
1354  unsigned NumberOfArgs;
1355 
1356  IRArgs()
1357  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1358  NumberOfArgs(0) {}
1359  };
1360 
1361  SmallVector<IRArgs, 8> ArgInfo;
1362 
1363 public:
1364  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1365  bool OnlyRequiredArgs = false)
1366  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1367  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1368  construct(Context, FI, OnlyRequiredArgs);
1369  }
1370 
1371  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1372  unsigned getInallocaArgNo() const {
1373  assert(hasInallocaArg());
1374  return InallocaArgNo;
1375  }
1376 
1377  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1378  unsigned getSRetArgNo() const {
1379  assert(hasSRetArg());
1380  return SRetArgNo;
1381  }
1382 
1383  unsigned totalIRArgs() const { return TotalIRArgs; }
1384 
1385  bool hasPaddingArg(unsigned ArgNo) const {
1386  assert(ArgNo < ArgInfo.size());
1387  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1388  }
1389  unsigned getPaddingArgNo(unsigned ArgNo) const {
1390  assert(hasPaddingArg(ArgNo));
1391  return ArgInfo[ArgNo].PaddingArgIndex;
1392  }
1393 
1394  /// Returns index of first IR argument corresponding to ArgNo, and their
1395  /// quantity.
1396  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1397  assert(ArgNo < ArgInfo.size());
1398  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1399  ArgInfo[ArgNo].NumberOfArgs);
1400  }
1401 
1402 private:
1403  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1404  bool OnlyRequiredArgs);
1405 };
1406 
1407 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1408  const CGFunctionInfo &FI,
1409  bool OnlyRequiredArgs) {
1410  unsigned IRArgNo = 0;
1411  bool SwapThisWithSRet = false;
1412  const ABIArgInfo &RetAI = FI.getReturnInfo();
1413 
1414  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1415  SwapThisWithSRet = RetAI.isSRetAfterThis();
1416  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1417  }
1418 
1419  unsigned ArgNo = 0;
1420  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1421  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1422  ++I, ++ArgNo) {
1423  assert(I != FI.arg_end());
1424  QualType ArgType = I->type;
1425  const ABIArgInfo &AI = I->info;
1426  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1427  auto &IRArgs = ArgInfo[ArgNo];
1428 
1429  if (AI.getPaddingType())
1430  IRArgs.PaddingArgIndex = IRArgNo++;
1431 
1432  switch (AI.getKind()) {
1433  case ABIArgInfo::Extend:
1434  case ABIArgInfo::Direct: {
1435  // FIXME: handle sseregparm someday...
1436  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1437  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1438  IRArgs.NumberOfArgs = STy->getNumElements();
1439  } else {
1440  IRArgs.NumberOfArgs = 1;
1441  }
1442  break;
1443  }
1444  case ABIArgInfo::Indirect:
1445  IRArgs.NumberOfArgs = 1;
1446  break;
1447  case ABIArgInfo::Ignore:
1448  case ABIArgInfo::InAlloca:
1449  // ignore and inalloca doesn't have matching LLVM parameters.
1450  IRArgs.NumberOfArgs = 0;
1451  break;
1453  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1454  break;
1455  case ABIArgInfo::Expand:
1456  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1457  break;
1458  }
1459 
1460  if (IRArgs.NumberOfArgs > 0) {
1461  IRArgs.FirstArgIndex = IRArgNo;
1462  IRArgNo += IRArgs.NumberOfArgs;
1463  }
1464 
1465  // Skip over the sret parameter when it comes second. We already handled it
1466  // above.
1467  if (IRArgNo == 1 && SwapThisWithSRet)
1468  IRArgNo++;
1469  }
1470  assert(ArgNo == ArgInfo.size());
1471 
1472  if (FI.usesInAlloca())
1473  InallocaArgNo = IRArgNo++;
1474 
1475  TotalIRArgs = IRArgNo;
1476 }
1477 } // namespace
1478 
1479 /***/
1480 
1482  return FI.getReturnInfo().isIndirect();
1483 }
1484 
1486  return ReturnTypeUsesSRet(FI) &&
1487  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1488 }
1489 
1491  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1492  switch (BT->getKind()) {
1493  default:
1494  return false;
1495  case BuiltinType::Float:
1497  case BuiltinType::Double:
1499  case BuiltinType::LongDouble:
1501  }
1502  }
1503 
1504  return false;
1505 }
1506 
1508  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1509  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1510  if (BT->getKind() == BuiltinType::LongDouble)
1512  }
1513  }
1514 
1515  return false;
1516 }
1517 
1519  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1520  return GetFunctionType(FI);
1521 }
1522 
1523 llvm::FunctionType *
1525 
1526  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1527  (void)Inserted;
1528  assert(Inserted && "Recursively being processed?");
1529 
1530  llvm::Type *resultType = nullptr;
1531  const ABIArgInfo &retAI = FI.getReturnInfo();
1532  switch (retAI.getKind()) {
1533  case ABIArgInfo::Expand:
1534  llvm_unreachable("Invalid ABI kind for return argument");
1535 
1536  case ABIArgInfo::Extend:
1537  case ABIArgInfo::Direct:
1538  resultType = retAI.getCoerceToType();
1539  break;
1540 
1541  case ABIArgInfo::InAlloca:
1542  if (retAI.getInAllocaSRet()) {
1543  // sret things on win32 aren't void, they return the sret pointer.
1544  QualType ret = FI.getReturnType();
1545  llvm::Type *ty = ConvertType(ret);
1546  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1547  resultType = llvm::PointerType::get(ty, addressSpace);
1548  } else {
1549  resultType = llvm::Type::getVoidTy(getLLVMContext());
1550  }
1551  break;
1552 
1553  case ABIArgInfo::Indirect:
1554  case ABIArgInfo::Ignore:
1555  resultType = llvm::Type::getVoidTy(getLLVMContext());
1556  break;
1557 
1559  resultType = retAI.getUnpaddedCoerceAndExpandType();
1560  break;
1561  }
1562 
1563  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1564  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1565 
1566  // Add type for sret argument.
1567  if (IRFunctionArgs.hasSRetArg()) {
1568  QualType Ret = FI.getReturnType();
1569  llvm::Type *Ty = ConvertType(Ret);
1570  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1571  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1572  llvm::PointerType::get(Ty, AddressSpace);
1573  }
1574 
1575  // Add type for inalloca argument.
1576  if (IRFunctionArgs.hasInallocaArg()) {
1577  auto ArgStruct = FI.getArgStruct();
1578  assert(ArgStruct);
1579  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1580  }
1581 
1582  // Add in all of the required arguments.
1583  unsigned ArgNo = 0;
1585  ie = it + FI.getNumRequiredArgs();
1586  for (; it != ie; ++it, ++ArgNo) {
1587  const ABIArgInfo &ArgInfo = it->info;
1588 
1589  // Insert a padding type to ensure proper alignment.
1590  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1591  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1592  ArgInfo.getPaddingType();
1593 
1594  unsigned FirstIRArg, NumIRArgs;
1595  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1596 
1597  switch (ArgInfo.getKind()) {
1598  case ABIArgInfo::Ignore:
1599  case ABIArgInfo::InAlloca:
1600  assert(NumIRArgs == 0);
1601  break;
1602 
1603  case ABIArgInfo::Indirect: {
1604  assert(NumIRArgs == 1);
1605  // indirect arguments are always on the stack, which is alloca addr space.
1606  llvm::Type *LTy = ConvertTypeForMem(it->type);
1607  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1608  CGM.getDataLayout().getAllocaAddrSpace());
1609  break;
1610  }
1611 
1612  case ABIArgInfo::Extend:
1613  case ABIArgInfo::Direct: {
1614  // Fast-isel and the optimizer generally like scalar values better than
1615  // FCAs, so we flatten them if this is safe to do for this argument.
1616  llvm::Type *argType = ArgInfo.getCoerceToType();
1617  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1618  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1619  assert(NumIRArgs == st->getNumElements());
1620  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1621  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1622  } else {
1623  assert(NumIRArgs == 1);
1624  ArgTypes[FirstIRArg] = argType;
1625  }
1626  break;
1627  }
1628 
1630  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1631  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1632  *ArgTypesIter++ = EltTy;
1633  }
1634  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1635  break;
1636  }
1637 
1638  case ABIArgInfo::Expand:
1639  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1640  getExpandedTypes(it->type, ArgTypesIter);
1641  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1642  break;
1643  }
1644  }
1645 
1646  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1647  assert(Erased && "Not in set?");
1648 
1649  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1650 }
1651 
1653  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1654  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1655 
1656  if (!isFuncTypeConvertible(FPT))
1657  return llvm::StructType::get(getLLVMContext());
1658 
1659  const CGFunctionInfo *Info;
1660  if (isa<CXXDestructorDecl>(MD))
1661  Info =
1663  else
1664  Info = &arrangeCXXMethodDeclaration(MD);
1665  return GetFunctionType(*Info);
1666 }
1667 
1669  llvm::AttrBuilder &FuncAttrs,
1670  const FunctionProtoType *FPT) {
1671  if (!FPT)
1672  return;
1673 
1675  FPT->isNothrow(Ctx))
1676  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1677 }
1678 
1679 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1680  bool AttrOnCallSite,
1681  llvm::AttrBuilder &FuncAttrs) {
1682  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1683  if (!HasOptnone) {
1684  if (CodeGenOpts.OptimizeSize)
1685  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1686  if (CodeGenOpts.OptimizeSize == 2)
1687  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1688  }
1689 
1690  if (CodeGenOpts.DisableRedZone)
1691  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1692  if (CodeGenOpts.NoImplicitFloat)
1693  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1694 
1695  if (AttrOnCallSite) {
1696  // Attributes that should go on the call site only.
1697  if (!CodeGenOpts.SimplifyLibCalls ||
1698  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1699  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1700  if (!CodeGenOpts.TrapFuncName.empty())
1701  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1702  } else {
1703  // Attributes that should go on the function, but not the call site.
1704  if (!CodeGenOpts.DisableFPElim) {
1705  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1706  } else if (CodeGenOpts.OmitLeafFramePointer) {
1707  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1708  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1709  } else {
1710  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1711  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1712  }
1713 
1714  FuncAttrs.addAttribute("less-precise-fpmad",
1715  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1716 
1717  if (!CodeGenOpts.FPDenormalMode.empty())
1718  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1719 
1720  FuncAttrs.addAttribute("no-trapping-math",
1721  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1722 
1723  // TODO: Are these all needed?
1724  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1725  FuncAttrs.addAttribute("no-infs-fp-math",
1726  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1727  FuncAttrs.addAttribute("no-nans-fp-math",
1728  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1729  FuncAttrs.addAttribute("unsafe-fp-math",
1730  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1731  FuncAttrs.addAttribute("use-soft-float",
1732  llvm::toStringRef(CodeGenOpts.SoftFloat));
1733  FuncAttrs.addAttribute("stack-protector-buffer-size",
1734  llvm::utostr(CodeGenOpts.SSPBufferSize));
1735  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1736  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1737  FuncAttrs.addAttribute(
1738  "correctly-rounded-divide-sqrt-fp-math",
1739  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1740 
1741  // TODO: Reciprocal estimate codegen options should apply to instructions?
1742  const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1743  if (!Recips.empty())
1744  FuncAttrs.addAttribute("reciprocal-estimates",
1745  llvm::join(Recips, ","));
1746 
1747  if (!CodeGenOpts.PreferVectorWidth.empty() &&
1748  CodeGenOpts.PreferVectorWidth != "none")
1749  FuncAttrs.addAttribute("prefer-vector-width",
1750  CodeGenOpts.PreferVectorWidth);
1751 
1752  if (CodeGenOpts.StackRealignment)
1753  FuncAttrs.addAttribute("stackrealign");
1754  if (CodeGenOpts.Backchain)
1755  FuncAttrs.addAttribute("backchain");
1756  }
1757 
1758  if (getLangOpts().assumeFunctionsAreConvergent()) {
1759  // Conservatively, mark all functions and calls in CUDA and OpenCL as
1760  // convergent (meaning, they may call an intrinsically convergent op, such
1761  // as __syncthreads() / barrier(), and so can't have certain optimizations
1762  // applied around them). LLVM will remove this attribute where it safely
1763  // can.
1764  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1765  }
1766 
1767  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1768  // Exceptions aren't supported in CUDA device code.
1769  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1770 
1771  // Respect -fcuda-flush-denormals-to-zero.
1772  if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1773  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1774  }
1775 }
1776 
1777 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1778  llvm::AttrBuilder FuncAttrs;
1779  ConstructDefaultFnAttrList(F.getName(),
1780  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1781  /* AttrOnCallsite = */ false, FuncAttrs);
1782  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1783 }
1784 
1786  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1787  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1788  llvm::AttrBuilder FuncAttrs;
1789  llvm::AttrBuilder RetAttrs;
1790 
1791  CallingConv = FI.getEffectiveCallingConvention();
1792  if (FI.isNoReturn())
1793  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1794 
1795  // If we have information about the function prototype, we can learn
1796  // attributes form there.
1798  CalleeInfo.getCalleeFunctionProtoType());
1799 
1800  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1801 
1802  bool HasOptnone = false;
1803  // FIXME: handle sseregparm someday...
1804  if (TargetDecl) {
1805  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1806  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1807  if (TargetDecl->hasAttr<NoThrowAttr>())
1808  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1809  if (TargetDecl->hasAttr<NoReturnAttr>())
1810  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1811  if (TargetDecl->hasAttr<ColdAttr>())
1812  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1813  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1814  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1815  if (TargetDecl->hasAttr<ConvergentAttr>())
1816  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1817 
1818  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1820  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1821  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1822  // These attributes are not inherited by overloads.
1823  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1824  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1825  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1826  }
1827 
1828  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1829  if (TargetDecl->hasAttr<ConstAttr>()) {
1830  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1831  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1832  } else if (TargetDecl->hasAttr<PureAttr>()) {
1833  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1834  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1835  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1836  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1837  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1838  }
1839  if (TargetDecl->hasAttr<RestrictAttr>())
1840  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1841  if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1842  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1843  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1844  FuncAttrs.addAttribute("no_caller_saved_registers");
1845 
1846  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1847  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1848  Optional<unsigned> NumElemsParam;
1849  // alloc_size args are base-1, 0 means not present.
1850  if (unsigned N = AllocSize->getNumElemsParam())
1851  NumElemsParam = N - 1;
1852  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
1853  NumElemsParam);
1854  }
1855  }
1856 
1857  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1858 
1859  if (CodeGenOpts.EnableSegmentedStacks &&
1860  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1861  FuncAttrs.addAttribute("split-stack");
1862 
1863  // Add NonLazyBind attribute to function declarations when -fno-plt
1864  // is used.
1865  if (TargetDecl && CodeGenOpts.NoPLT) {
1866  if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1867  if (!Fn->isDefined() && !AttrOnCallSite) {
1868  FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1869  }
1870  }
1871  }
1872 
1873  if (!AttrOnCallSite) {
1874  bool DisableTailCalls =
1875  CodeGenOpts.DisableTailCalls ||
1876  (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1877  TargetDecl->hasAttr<AnyX86InterruptAttr>()));
1878  FuncAttrs.addAttribute("disable-tail-calls",
1879  llvm::toStringRef(DisableTailCalls));
1880 
1881  // Add target-cpu and target-features attributes to functions. If
1882  // we have a decl for the function and it has a target attribute then
1883  // parse that and add it to the feature set.
1884  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1885  std::vector<std::string> Features;
1886  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1887  if (FD && FD->hasAttr<TargetAttr>()) {
1888  llvm::StringMap<bool> FeatureMap;
1889  getFunctionFeatureMap(FeatureMap, FD);
1890 
1891  // Produce the canonical string for this set of features.
1892  for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1893  ie = FeatureMap.end();
1894  it != ie; ++it)
1895  Features.push_back((it->second ? "+" : "-") + it->first().str());
1896 
1897  // Now add the target-cpu and target-features to the function.
1898  // While we populated the feature map above, we still need to
1899  // get and parse the target attribute so we can get the cpu for
1900  // the function.
1901  const auto *TD = FD->getAttr<TargetAttr>();
1902  TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1903  if (ParsedAttr.Architecture != "" &&
1904  getTarget().isValidCPUName(ParsedAttr.Architecture))
1905  TargetCPU = ParsedAttr.Architecture;
1906  } else {
1907  // Otherwise just add the existing target cpu and target features to the
1908  // function.
1909  Features = getTarget().getTargetOpts().Features;
1910  }
1911 
1912  if (TargetCPU != "")
1913  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1914  if (!Features.empty()) {
1915  std::sort(Features.begin(), Features.end());
1916  FuncAttrs.addAttribute(
1917  "target-features",
1918  llvm::join(Features, ","));
1919  }
1920  }
1921 
1922  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1923 
1924  QualType RetTy = FI.getReturnType();
1925  const ABIArgInfo &RetAI = FI.getReturnInfo();
1926  switch (RetAI.getKind()) {
1927  case ABIArgInfo::Extend:
1928  if (RetTy->hasSignedIntegerRepresentation())
1929  RetAttrs.addAttribute(llvm::Attribute::SExt);
1930  else if (RetTy->hasUnsignedIntegerRepresentation())
1931  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1932  // FALL THROUGH
1933  case ABIArgInfo::Direct:
1934  if (RetAI.getInReg())
1935  RetAttrs.addAttribute(llvm::Attribute::InReg);
1936  break;
1937  case ABIArgInfo::Ignore:
1938  break;
1939 
1940  case ABIArgInfo::InAlloca:
1941  case ABIArgInfo::Indirect: {
1942  // inalloca and sret disable readnone and readonly
1943  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1944  .removeAttribute(llvm::Attribute::ReadNone);
1945  break;
1946  }
1947 
1949  break;
1950 
1951  case ABIArgInfo::Expand:
1952  llvm_unreachable("Invalid ABI kind for return argument");
1953  }
1954 
1955  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1956  QualType PTy = RefTy->getPointeeType();
1957  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1958  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1959  .getQuantity());
1960  else if (getContext().getTargetAddressSpace(PTy) == 0)
1961  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1962  }
1963 
1964  bool hasUsedSRet = false;
1965  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1966 
1967  // Attach attributes to sret.
1968  if (IRFunctionArgs.hasSRetArg()) {
1969  llvm::AttrBuilder SRETAttrs;
1970  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1971  hasUsedSRet = true;
1972  if (RetAI.getInReg())
1973  SRETAttrs.addAttribute(llvm::Attribute::InReg);
1974  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
1975  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
1976  }
1977 
1978  // Attach attributes to inalloca argument.
1979  if (IRFunctionArgs.hasInallocaArg()) {
1980  llvm::AttrBuilder Attrs;
1981  Attrs.addAttribute(llvm::Attribute::InAlloca);
1982  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
1983  llvm::AttributeSet::get(getLLVMContext(), Attrs);
1984  }
1985 
1986  unsigned ArgNo = 0;
1988  E = FI.arg_end();
1989  I != E; ++I, ++ArgNo) {
1990  QualType ParamType = I->type;
1991  const ABIArgInfo &AI = I->info;
1992  llvm::AttrBuilder Attrs;
1993 
1994  // Add attribute for padding argument, if necessary.
1995  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1996  if (AI.getPaddingInReg()) {
1997  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1998  llvm::AttributeSet::get(
1999  getLLVMContext(),
2000  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2001  }
2002  }
2003 
2004  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2005  // have the corresponding parameter variable. It doesn't make
2006  // sense to do it here because parameters are so messed up.
2007  switch (AI.getKind()) {
2008  case ABIArgInfo::Extend:
2009  if (ParamType->isSignedIntegerOrEnumerationType())
2010  Attrs.addAttribute(llvm::Attribute::SExt);
2011  else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
2012  if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
2013  Attrs.addAttribute(llvm::Attribute::SExt);
2014  else
2015  Attrs.addAttribute(llvm::Attribute::ZExt);
2016  }
2017  // FALL THROUGH
2018  case ABIArgInfo::Direct:
2019  if (ArgNo == 0 && FI.isChainCall())
2020  Attrs.addAttribute(llvm::Attribute::Nest);
2021  else if (AI.getInReg())
2022  Attrs.addAttribute(llvm::Attribute::InReg);
2023  break;
2024 
2025  case ABIArgInfo::Indirect: {
2026  if (AI.getInReg())
2027  Attrs.addAttribute(llvm::Attribute::InReg);
2028 
2029  if (AI.getIndirectByVal())
2030  Attrs.addAttribute(llvm::Attribute::ByVal);
2031 
2032  CharUnits Align = AI.getIndirectAlign();
2033 
2034  // In a byval argument, it is important that the required
2035  // alignment of the type is honored, as LLVM might be creating a
2036  // *new* stack object, and needs to know what alignment to give
2037  // it. (Sometimes it can deduce a sensible alignment on its own,
2038  // but not if clang decides it must emit a packed struct, or the
2039  // user specifies increased alignment requirements.)
2040  //
2041  // This is different from indirect *not* byval, where the object
2042  // exists already, and the align attribute is purely
2043  // informative.
2044  assert(!Align.isZero());
2045 
2046  // For now, only add this when we have a byval argument.
2047  // TODO: be less lazy about updating test cases.
2048  if (AI.getIndirectByVal())
2049  Attrs.addAlignmentAttr(Align.getQuantity());
2050 
2051  // byval disables readnone and readonly.
2052  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2053  .removeAttribute(llvm::Attribute::ReadNone);
2054  break;
2055  }
2056  case ABIArgInfo::Ignore:
2057  case ABIArgInfo::Expand:
2059  break;
2060 
2061  case ABIArgInfo::InAlloca:
2062  // inalloca disables readnone and readonly.
2063  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2064  .removeAttribute(llvm::Attribute::ReadNone);
2065  continue;
2066  }
2067 
2068  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2069  QualType PTy = RefTy->getPointeeType();
2070  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2071  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2072  .getQuantity());
2073  else if (getContext().getTargetAddressSpace(PTy) == 0)
2074  Attrs.addAttribute(llvm::Attribute::NonNull);
2075  }
2076 
2077  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2079  break;
2080 
2082  // Add 'sret' if we haven't already used it for something, but
2083  // only if the result is void.
2084  if (!hasUsedSRet && RetTy->isVoidType()) {
2085  Attrs.addAttribute(llvm::Attribute::StructRet);
2086  hasUsedSRet = true;
2087  }
2088 
2089  // Add 'noalias' in either case.
2090  Attrs.addAttribute(llvm::Attribute::NoAlias);
2091 
2092  // Add 'dereferenceable' and 'alignment'.
2093  auto PTy = ParamType->getPointeeType();
2094  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2095  auto info = getContext().getTypeInfoInChars(PTy);
2096  Attrs.addDereferenceableAttr(info.first.getQuantity());
2097  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2098  info.second.getQuantity()));
2099  }
2100  break;
2101  }
2102 
2104  Attrs.addAttribute(llvm::Attribute::SwiftError);
2105  break;
2106 
2108  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2109  break;
2110  }
2111 
2112  if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2113  Attrs.addAttribute(llvm::Attribute::NoCapture);
2114 
2115  if (Attrs.hasAttributes()) {
2116  unsigned FirstIRArg, NumIRArgs;
2117  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2118  for (unsigned i = 0; i < NumIRArgs; i++)
2119  ArgAttrs[FirstIRArg + i] =
2120  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2121  }
2122  }
2123  assert(ArgNo == FI.arg_size());
2124 
2125  AttrList = llvm::AttributeList::get(
2126  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2127  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2128 }
2129 
2130 /// An argument came in as a promoted argument; demote it back to its
2131 /// declared type.
2133  const VarDecl *var,
2134  llvm::Value *value) {
2135  llvm::Type *varType = CGF.ConvertType(var->getType());
2136 
2137  // This can happen with promotions that actually don't change the
2138  // underlying type, like the enum promotions.
2139  if (value->getType() == varType) return value;
2140 
2141  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2142  && "unexpected promotion type");
2143 
2144  if (isa<llvm::IntegerType>(varType))
2145  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2146 
2147  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2148 }
2149 
2150 /// Returns the attribute (either parameter attribute, or function
2151 /// attribute), which declares argument ArgNo to be non-null.
2152 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2153  QualType ArgType, unsigned ArgNo) {
2154  // FIXME: __attribute__((nonnull)) can also be applied to:
2155  // - references to pointers, where the pointee is known to be
2156  // nonnull (apparently a Clang extension)
2157  // - transparent unions containing pointers
2158  // In the former case, LLVM IR cannot represent the constraint. In
2159  // the latter case, we have no guarantee that the transparent union
2160  // is in fact passed as a pointer.
2161  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2162  return nullptr;
2163  // First, check attribute on parameter itself.
2164  if (PVD) {
2165  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2166  return ParmNNAttr;
2167  }
2168  // Check function attributes.
2169  if (!FD)
2170  return nullptr;
2171  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2172  if (NNAttr->isNonNull(ArgNo))
2173  return NNAttr;
2174  }
2175  return nullptr;
2176 }
2177 
2178 namespace {
2179  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2180  Address Temp;
2181  Address Arg;
2182  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2183  void Emit(CodeGenFunction &CGF, Flags flags) override {
2184  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2185  CGF.Builder.CreateStore(errorValue, Arg);
2186  }
2187  };
2188 }
2189 
2191  llvm::Function *Fn,
2192  const FunctionArgList &Args) {
2193  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2194  // Naked functions don't have prologues.
2195  return;
2196 
2197  // If this is an implicit-return-zero function, go ahead and
2198  // initialize the return value. TODO: it might be nice to have
2199  // a more general mechanism for this that didn't require synthesized
2200  // return statements.
2201  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2202  if (FD->hasImplicitReturnZero()) {
2203  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2204  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2205  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2206  Builder.CreateStore(Zero, ReturnValue);
2207  }
2208  }
2209 
2210  // FIXME: We no longer need the types from FunctionArgList; lift up and
2211  // simplify.
2212 
2213  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2214  // Flattened function arguments.
2216  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2217  for (auto &Arg : Fn->args()) {
2218  FnArgs.push_back(&Arg);
2219  }
2220  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2221 
2222  // If we're using inalloca, all the memory arguments are GEPs off of the last
2223  // parameter, which is a pointer to the complete memory area.
2224  Address ArgStruct = Address::invalid();
2225  const llvm::StructLayout *ArgStructLayout = nullptr;
2226  if (IRFunctionArgs.hasInallocaArg()) {
2227  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2228  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2229  FI.getArgStructAlignment());
2230 
2231  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2232  }
2233 
2234  // Name the struct return parameter.
2235  if (IRFunctionArgs.hasSRetArg()) {
2236  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2237  AI->setName("agg.result");
2238  AI->addAttr(llvm::Attribute::NoAlias);
2239  }
2240 
2241  // Track if we received the parameter as a pointer (indirect, byval, or
2242  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2243  // into a local alloca for us.
2245  ArgVals.reserve(Args.size());
2246 
2247  // Create a pointer value for every parameter declaration. This usually
2248  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2249  // any cleanups or do anything that might unwind. We do that separately, so
2250  // we can push the cleanups in the correct order for the ABI.
2251  assert(FI.arg_size() == Args.size() &&
2252  "Mismatch between function signature & arguments.");
2253  unsigned ArgNo = 0;
2255  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2256  i != e; ++i, ++info_it, ++ArgNo) {
2257  const VarDecl *Arg = *i;
2258  QualType Ty = info_it->type;
2259  const ABIArgInfo &ArgI = info_it->info;
2260 
2261  bool isPromoted =
2262  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2263 
2264  unsigned FirstIRArg, NumIRArgs;
2265  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2266 
2267  switch (ArgI.getKind()) {
2268  case ABIArgInfo::InAlloca: {
2269  assert(NumIRArgs == 0);
2270  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2271  CharUnits FieldOffset =
2272  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2273  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2274  Arg->getName());
2275  ArgVals.push_back(ParamValue::forIndirect(V));
2276  break;
2277  }
2278 
2279  case ABIArgInfo::Indirect: {
2280  assert(NumIRArgs == 1);
2281  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2282 
2283  if (!hasScalarEvaluationKind(Ty)) {
2284  // Aggregates and complex variables are accessed by reference. All we
2285  // need to do is realign the value, if requested.
2286  Address V = ParamAddr;
2287  if (ArgI.getIndirectRealign()) {
2288  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2289 
2290  // Copy from the incoming argument pointer to the temporary with the
2291  // appropriate alignment.
2292  //
2293  // FIXME: We should have a common utility for generating an aggregate
2294  // copy.
2296  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2297  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2298  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2299  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2300  V = AlignedTemp;
2301  }
2302  ArgVals.push_back(ParamValue::forIndirect(V));
2303  } else {
2304  // Load scalar value from indirect argument.
2305  llvm::Value *V =
2306  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2307 
2308  if (isPromoted)
2309  V = emitArgumentDemotion(*this, Arg, V);
2310  ArgVals.push_back(ParamValue::forDirect(V));
2311  }
2312  break;
2313  }
2314 
2315  case ABIArgInfo::Extend:
2316  case ABIArgInfo::Direct: {
2317 
2318  // If we have the trivial case, handle it with no muss and fuss.
2319  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2320  ArgI.getCoerceToType() == ConvertType(Ty) &&
2321  ArgI.getDirectOffset() == 0) {
2322  assert(NumIRArgs == 1);
2323  llvm::Value *V = FnArgs[FirstIRArg];
2324  auto AI = cast<llvm::Argument>(V);
2325 
2326  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2327  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2328  PVD->getFunctionScopeIndex()))
2329  AI->addAttr(llvm::Attribute::NonNull);
2330 
2331  QualType OTy = PVD->getOriginalType();
2332  if (const auto *ArrTy =
2333  getContext().getAsConstantArrayType(OTy)) {
2334  // A C99 array parameter declaration with the static keyword also
2335  // indicates dereferenceability, and if the size is constant we can
2336  // use the dereferenceable attribute (which requires the size in
2337  // bytes).
2338  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2339  QualType ETy = ArrTy->getElementType();
2340  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2341  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2342  ArrSize) {
2343  llvm::AttrBuilder Attrs;
2344  Attrs.addDereferenceableAttr(
2345  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2346  AI->addAttrs(Attrs);
2347  } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2348  AI->addAttr(llvm::Attribute::NonNull);
2349  }
2350  }
2351  } else if (const auto *ArrTy =
2352  getContext().getAsVariableArrayType(OTy)) {
2353  // For C99 VLAs with the static keyword, we don't know the size so
2354  // we can't use the dereferenceable attribute, but in addrspace(0)
2355  // we know that it must be nonnull.
2356  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2357  !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2358  AI->addAttr(llvm::Attribute::NonNull);
2359  }
2360 
2361  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2362  if (!AVAttr)
2363  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2364  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2365  if (AVAttr) {
2366  llvm::Value *AlignmentValue =
2367  EmitScalarExpr(AVAttr->getAlignment());
2368  llvm::ConstantInt *AlignmentCI =
2369  cast<llvm::ConstantInt>(AlignmentValue);
2370  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2371  +llvm::Value::MaximumAlignment);
2372  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2373  }
2374  }
2375 
2376  if (Arg->getType().isRestrictQualified())
2377  AI->addAttr(llvm::Attribute::NoAlias);
2378 
2379  // LLVM expects swifterror parameters to be used in very restricted
2380  // ways. Copy the value into a less-restricted temporary.
2381  if (FI.getExtParameterInfo(ArgNo).getABI()
2383  QualType pointeeTy = Ty->getPointeeType();
2384  assert(pointeeTy->isPointerType());
2385  Address temp =
2386  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2387  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2388  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2389  Builder.CreateStore(incomingErrorValue, temp);
2390  V = temp.getPointer();
2391 
2392  // Push a cleanup to copy the value back at the end of the function.
2393  // The convention does not guarantee that the value will be written
2394  // back if the function exits with an unwind exception.
2395  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2396  }
2397 
2398  // Ensure the argument is the correct type.
2399  if (V->getType() != ArgI.getCoerceToType())
2400  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2401 
2402  if (isPromoted)
2403  V = emitArgumentDemotion(*this, Arg, V);
2404 
2405  // Because of merging of function types from multiple decls it is
2406  // possible for the type of an argument to not match the corresponding
2407  // type in the function type. Since we are codegening the callee
2408  // in here, add a cast to the argument type.
2409  llvm::Type *LTy = ConvertType(Arg->getType());
2410  if (V->getType() != LTy)
2411  V = Builder.CreateBitCast(V, LTy);
2412 
2413  ArgVals.push_back(ParamValue::forDirect(V));
2414  break;
2415  }
2416 
2417  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2418  Arg->getName());
2419 
2420  // Pointer to store into.
2421  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2422 
2423  // Fast-isel and the optimizer generally like scalar values better than
2424  // FCAs, so we flatten them if this is safe to do for this argument.
2425  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2426  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2427  STy->getNumElements() > 1) {
2428  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2429  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2430  llvm::Type *DstTy = Ptr.getElementType();
2431  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2432 
2433  Address AddrToStoreInto = Address::invalid();
2434  if (SrcSize <= DstSize) {
2435  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2436  } else {
2437  AddrToStoreInto =
2438  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2439  }
2440 
2441  assert(STy->getNumElements() == NumIRArgs);
2442  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2443  auto AI = FnArgs[FirstIRArg + i];
2444  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2445  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2446  Address EltPtr =
2447  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2448  Builder.CreateStore(AI, EltPtr);
2449  }
2450 
2451  if (SrcSize > DstSize) {
2452  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2453  }
2454 
2455  } else {
2456  // Simple case, just do a coerced store of the argument into the alloca.
2457  assert(NumIRArgs == 1);
2458  auto AI = FnArgs[FirstIRArg];
2459  AI->setName(Arg->getName() + ".coerce");
2460  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2461  }
2462 
2463  // Match to what EmitParmDecl is expecting for this type.
2465  llvm::Value *V =
2466  EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2467  if (isPromoted)
2468  V = emitArgumentDemotion(*this, Arg, V);
2469  ArgVals.push_back(ParamValue::forDirect(V));
2470  } else {
2471  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2472  }
2473  break;
2474  }
2475 
2477  // Reconstruct into a temporary.
2478  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2479  ArgVals.push_back(ParamValue::forIndirect(alloca));
2480 
2481  auto coercionType = ArgI.getCoerceAndExpandType();
2482  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2483  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2484 
2485  unsigned argIndex = FirstIRArg;
2486  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2487  llvm::Type *eltType = coercionType->getElementType(i);
2489  continue;
2490 
2491  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2492  auto elt = FnArgs[argIndex++];
2493  Builder.CreateStore(elt, eltAddr);
2494  }
2495  assert(argIndex == FirstIRArg + NumIRArgs);
2496  break;
2497  }
2498 
2499  case ABIArgInfo::Expand: {
2500  // If this structure was expanded into multiple arguments then
2501  // we need to create a temporary and reconstruct it from the
2502  // arguments.
2503  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2504  LValue LV = MakeAddrLValue(Alloca, Ty);
2505  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2506 
2507  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2508  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2509  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2510  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2511  auto AI = FnArgs[FirstIRArg + i];
2512  AI->setName(Arg->getName() + "." + Twine(i));
2513  }
2514  break;
2515  }
2516 
2517  case ABIArgInfo::Ignore:
2518  assert(NumIRArgs == 0);
2519  // Initialize the local variable appropriately.
2520  if (!hasScalarEvaluationKind(Ty)) {
2521  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2522  } else {
2523  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2524  ArgVals.push_back(ParamValue::forDirect(U));
2525  }
2526  break;
2527  }
2528  }
2529 
2530  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2531  for (int I = Args.size() - 1; I >= 0; --I)
2532  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2533  } else {
2534  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2535  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2536  }
2537 }
2538 
2539 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2540  while (insn->use_empty()) {
2541  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2542  if (!bitcast) return;
2543 
2544  // This is "safe" because we would have used a ConstantExpr otherwise.
2545  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2546  bitcast->eraseFromParent();
2547  }
2548 }
2549 
2550 /// Try to emit a fused autorelease of a return result.
2552  llvm::Value *result) {
2553  // We must be immediately followed the cast.
2554  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2555  if (BB->empty()) return nullptr;
2556  if (&BB->back() != result) return nullptr;
2557 
2558  llvm::Type *resultType = result->getType();
2559 
2560  // result is in a BasicBlock and is therefore an Instruction.
2561  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2562 
2564 
2565  // Look for:
2566  // %generator = bitcast %type1* %generator2 to %type2*
2567  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2568  // We would have emitted this as a constant if the operand weren't
2569  // an Instruction.
2570  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2571 
2572  // Require the generator to be immediately followed by the cast.
2573  if (generator->getNextNode() != bitcast)
2574  return nullptr;
2575 
2576  InstsToKill.push_back(bitcast);
2577  }
2578 
2579  // Look for:
2580  // %generator = call i8* @objc_retain(i8* %originalResult)
2581  // or
2582  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2583  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2584  if (!call) return nullptr;
2585 
2586  bool doRetainAutorelease;
2587 
2588  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2589  doRetainAutorelease = true;
2590  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2592  doRetainAutorelease = false;
2593 
2594  // If we emitted an assembly marker for this call (and the
2595  // ARCEntrypoints field should have been set if so), go looking
2596  // for that call. If we can't find it, we can't do this
2597  // optimization. But it should always be the immediately previous
2598  // instruction, unless we needed bitcasts around the call.
2600  llvm::Instruction *prev = call->getPrevNode();
2601  assert(prev);
2602  if (isa<llvm::BitCastInst>(prev)) {
2603  prev = prev->getPrevNode();
2604  assert(prev);
2605  }
2606  assert(isa<llvm::CallInst>(prev));
2607  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2609  InstsToKill.push_back(prev);
2610  }
2611  } else {
2612  return nullptr;
2613  }
2614 
2615  result = call->getArgOperand(0);
2616  InstsToKill.push_back(call);
2617 
2618  // Keep killing bitcasts, for sanity. Note that we no longer care
2619  // about precise ordering as long as there's exactly one use.
2620  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2621  if (!bitcast->hasOneUse()) break;
2622  InstsToKill.push_back(bitcast);
2623  result = bitcast->getOperand(0);
2624  }
2625 
2626  // Delete all the unnecessary instructions, from latest to earliest.
2627  for (auto *I : InstsToKill)
2628  I->eraseFromParent();
2629 
2630  // Do the fused retain/autorelease if we were asked to.
2631  if (doRetainAutorelease)
2632  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2633 
2634  // Cast back to the result type.
2635  return CGF.Builder.CreateBitCast(result, resultType);
2636 }
2637 
2638 /// If this is a +1 of the value of an immutable 'self', remove it.
2640  llvm::Value *result) {
2641  // This is only applicable to a method with an immutable 'self'.
2642  const ObjCMethodDecl *method =
2643  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2644  if (!method) return nullptr;
2645  const VarDecl *self = method->getSelfDecl();
2646  if (!self->getType().isConstQualified()) return nullptr;
2647 
2648  // Look for a retain call.
2649  llvm::CallInst *retainCall =
2650  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2651  if (!retainCall ||
2652  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2653  return nullptr;
2654 
2655  // Look for an ordinary load of 'self'.
2656  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2657  llvm::LoadInst *load =
2658  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2659  if (!load || load->isAtomic() || load->isVolatile() ||
2660  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2661  return nullptr;
2662 
2663  // Okay! Burn it all down. This relies for correctness on the
2664  // assumption that the retain is emitted as part of the return and
2665  // that thereafter everything is used "linearly".
2666  llvm::Type *resultType = result->getType();
2667  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2668  assert(retainCall->use_empty());
2669  retainCall->eraseFromParent();
2670  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2671 
2672  return CGF.Builder.CreateBitCast(load, resultType);
2673 }
2674 
2675 /// Emit an ARC autorelease of the result of a function.
2676 ///
2677 /// \return the value to actually return from the function
2679  llvm::Value *result) {
2680  // If we're returning 'self', kill the initial retain. This is a
2681  // heuristic attempt to "encourage correctness" in the really unfortunate
2682  // case where we have a return of self during a dealloc and we desperately
2683  // need to avoid the possible autorelease.
2684  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2685  return self;
2686 
2687  // At -O0, try to emit a fused retain/autorelease.
2688  if (CGF.shouldUseFusedARCCalls())
2689  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2690  return fused;
2691 
2692  return CGF.EmitARCAutoreleaseReturnValue(result);
2693 }
2694 
2695 /// Heuristically search for a dominating store to the return-value slot.
2696 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2697  // Check if a User is a store which pointerOperand is the ReturnValue.
2698  // We are looking for stores to the ReturnValue, not for stores of the
2699  // ReturnValue to some other location.
2700  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2701  auto *SI = dyn_cast<llvm::StoreInst>(U);
2702  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2703  return nullptr;
2704  // These aren't actually possible for non-coerced returns, and we
2705  // only care about non-coerced returns on this code path.
2706  assert(!SI->isAtomic() && !SI->isVolatile());
2707  return SI;
2708  };
2709  // If there are multiple uses of the return-value slot, just check
2710  // for something immediately preceding the IP. Sometimes this can
2711  // happen with how we generate implicit-returns; it can also happen
2712  // with noreturn cleanups.
2713  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2714  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2715  if (IP->empty()) return nullptr;
2716  llvm::Instruction *I = &IP->back();
2717 
2718  // Skip lifetime markers
2719  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2720  IE = IP->rend();
2721  II != IE; ++II) {
2722  if (llvm::IntrinsicInst *Intrinsic =
2723  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2724  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2725  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2726  ++II;
2727  if (II == IE)
2728  break;
2729  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2730  continue;
2731  }
2732  }
2733  I = &*II;
2734  break;
2735  }
2736 
2737  return GetStoreIfValid(I);
2738  }
2739 
2740  llvm::StoreInst *store =
2741  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2742  if (!store) return nullptr;
2743 
2744  // Now do a first-and-dirty dominance check: just walk up the
2745  // single-predecessors chain from the current insertion point.
2746  llvm::BasicBlock *StoreBB = store->getParent();
2747  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2748  while (IP != StoreBB) {
2749  if (!(IP = IP->getSinglePredecessor()))
2750  return nullptr;
2751  }
2752 
2753  // Okay, the store's basic block dominates the insertion point; we
2754  // can do our thing.
2755  return store;
2756 }
2757 
2759  bool EmitRetDbgLoc,
2760  SourceLocation EndLoc) {
2761  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2762  // Naked functions don't have epilogues.
2763  Builder.CreateUnreachable();
2764  return;
2765  }
2766 
2767  // Functions with no result always return void.
2768  if (!ReturnValue.isValid()) {
2769  Builder.CreateRetVoid();
2770  return;
2771  }
2772 
2773  llvm::DebugLoc RetDbgLoc;
2774  llvm::Value *RV = nullptr;
2775  QualType RetTy = FI.getReturnType();
2776  const ABIArgInfo &RetAI = FI.getReturnInfo();
2777 
2778  switch (RetAI.getKind()) {
2779  case ABIArgInfo::InAlloca:
2780  // Aggregrates get evaluated directly into the destination. Sometimes we
2781  // need to return the sret value in a register, though.
2782  assert(hasAggregateEvaluationKind(RetTy));
2783  if (RetAI.getInAllocaSRet()) {
2784  llvm::Function::arg_iterator EI = CurFn->arg_end();
2785  --EI;
2786  llvm::Value *ArgStruct = &*EI;
2787  llvm::Value *SRet = Builder.CreateStructGEP(
2788  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2789  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2790  }
2791  break;
2792 
2793  case ABIArgInfo::Indirect: {
2794  auto AI = CurFn->arg_begin();
2795  if (RetAI.isSRetAfterThis())
2796  ++AI;
2797  switch (getEvaluationKind(RetTy)) {
2798  case TEK_Complex: {
2799  ComplexPairTy RT =
2800  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2801  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2802  /*isInit*/ true);
2803  break;
2804  }
2805  case TEK_Aggregate:
2806  // Do nothing; aggregrates get evaluated directly into the destination.
2807  break;
2808  case TEK_Scalar:
2809  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2810  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2811  /*isInit*/ true);
2812  break;
2813  }
2814  break;
2815  }
2816 
2817  case ABIArgInfo::Extend:
2818  case ABIArgInfo::Direct:
2819  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2820  RetAI.getDirectOffset() == 0) {
2821  // The internal return value temp always will have pointer-to-return-type
2822  // type, just do a load.
2823 
2824  // If there is a dominating store to ReturnValue, we can elide
2825  // the load, zap the store, and usually zap the alloca.
2826  if (llvm::StoreInst *SI =
2828  // Reuse the debug location from the store unless there is
2829  // cleanup code to be emitted between the store and return
2830  // instruction.
2831  if (EmitRetDbgLoc && !AutoreleaseResult)
2832  RetDbgLoc = SI->getDebugLoc();
2833  // Get the stored value and nuke the now-dead store.
2834  RV = SI->getValueOperand();
2835  SI->eraseFromParent();
2836 
2837  // If that was the only use of the return value, nuke it as well now.
2838  auto returnValueInst = ReturnValue.getPointer();
2839  if (returnValueInst->use_empty()) {
2840  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2841  alloca->eraseFromParent();
2842  ReturnValue = Address::invalid();
2843  }
2844  }
2845 
2846  // Otherwise, we have to do a simple load.
2847  } else {
2848  RV = Builder.CreateLoad(ReturnValue);
2849  }
2850  } else {
2851  // If the value is offset in memory, apply the offset now.
2852  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2853 
2854  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2855  }
2856 
2857  // In ARC, end functions that return a retainable type with a call
2858  // to objc_autoreleaseReturnValue.
2859  if (AutoreleaseResult) {
2860 #ifndef NDEBUG
2861  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2862  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2863  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2864  // CurCodeDecl or BlockInfo.
2865  QualType RT;
2866 
2867  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2868  RT = FD->getReturnType();
2869  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2870  RT = MD->getReturnType();
2871  else if (isa<BlockDecl>(CurCodeDecl))
2872  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2873  else
2874  llvm_unreachable("Unexpected function/method type");
2875 
2876  assert(getLangOpts().ObjCAutoRefCount &&
2877  !FI.isReturnsRetained() &&
2878  RT->isObjCRetainableType());
2879 #endif
2880  RV = emitAutoreleaseOfResult(*this, RV);
2881  }
2882 
2883  break;
2884 
2885  case ABIArgInfo::Ignore:
2886  break;
2887 
2889  auto coercionType = RetAI.getCoerceAndExpandType();
2890  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2891 
2892  // Load all of the coerced elements out into results.
2894  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2895  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2896  auto coercedEltType = coercionType->getElementType(i);
2897  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2898  continue;
2899 
2900  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2901  auto elt = Builder.CreateLoad(eltAddr);
2902  results.push_back(elt);
2903  }
2904 
2905  // If we have one result, it's the single direct result type.
2906  if (results.size() == 1) {
2907  RV = results[0];
2908 
2909  // Otherwise, we need to make a first-class aggregate.
2910  } else {
2911  // Construct a return type that lacks padding elements.
2912  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2913 
2914  RV = llvm::UndefValue::get(returnType);
2915  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2916  RV = Builder.CreateInsertValue(RV, results[i], i);
2917  }
2918  }
2919  break;
2920  }
2921 
2922  case ABIArgInfo::Expand:
2923  llvm_unreachable("Invalid ABI kind for return argument");
2924  }
2925 
2926  llvm::Instruction *Ret;
2927  if (RV) {
2928  EmitReturnValueCheck(RV);
2929  Ret = Builder.CreateRet(RV);
2930  } else {
2931  Ret = Builder.CreateRetVoid();
2932  }
2933 
2934  if (RetDbgLoc)
2935  Ret->setDebugLoc(std::move(RetDbgLoc));
2936 }
2937 
2939  // A current decl may not be available when emitting vtable thunks.
2940  if (!CurCodeDecl)
2941  return;
2942 
2943  ReturnsNonNullAttr *RetNNAttr = nullptr;
2944  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2945  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2946 
2947  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2948  return;
2949 
2950  // Prefer the returns_nonnull attribute if it's present.
2951  SourceLocation AttrLoc;
2952  SanitizerMask CheckKind;
2953  SanitizerHandler Handler;
2954  if (RetNNAttr) {
2955  assert(!requiresReturnValueNullabilityCheck() &&
2956  "Cannot check nullability and the nonnull attribute");
2957  AttrLoc = RetNNAttr->getLocation();
2958  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2959  Handler = SanitizerHandler::NonnullReturn;
2960  } else {
2961  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2962  if (auto *TSI = DD->getTypeSourceInfo())
2963  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2964  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2965  CheckKind = SanitizerKind::NullabilityReturn;
2966  Handler = SanitizerHandler::NullabilityReturn;
2967  }
2968 
2969  SanitizerScope SanScope(this);
2970 
2971  // Make sure the "return" source location is valid. If we're checking a
2972  // nullability annotation, make sure the preconditions for the check are met.
2973  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
2974  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
2975  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
2976  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
2977  if (requiresReturnValueNullabilityCheck())
2978  CanNullCheck =
2979  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
2980  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
2981  EmitBlock(Check);
2982 
2983  // Now do the null check.
2984  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
2985  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
2986  llvm::Value *DynamicData[] = {SLocPtr};
2987  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
2988 
2989  EmitBlock(NoCheck);
2990 
2991 #ifndef NDEBUG
2992  // The return location should not be used after the check has been emitted.
2993  ReturnLocation = Address::invalid();
2994 #endif
2995 }
2996 
2998  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2999  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3000 }
3001 
3003  QualType Ty) {
3004  // FIXME: Generate IR in one pass, rather than going back and fixing up these
3005  // placeholders.
3006  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3007  llvm::Type *IRPtrTy = IRTy->getPointerTo();
3008  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3009 
3010  // FIXME: When we generate this IR in one pass, we shouldn't need
3011  // this win32-specific alignment hack.
3013  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3014 
3015  return AggValueSlot::forAddr(Address(Placeholder, Align),
3016  Ty.getQualifiers(),
3020 }
3021 
3023  const VarDecl *param,
3024  SourceLocation loc) {
3025  // StartFunction converted the ABI-lowered parameter(s) into a
3026  // local alloca. We need to turn that into an r-value suitable
3027  // for EmitCall.
3028  Address local = GetAddrOfLocalVar(param);
3029 
3030  QualType type = param->getType();
3031 
3032  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3033  "cannot emit delegate call arguments for inalloca arguments!");
3034 
3035  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3036  // but the argument needs to be the original pointer.
3037  if (type->isReferenceType()) {
3038  args.add(RValue::get(Builder.CreateLoad(local)), type);
3039 
3040  // In ARC, move out of consumed arguments so that the release cleanup
3041  // entered by StartFunction doesn't cause an over-release. This isn't
3042  // optimal -O0 code generation, but it should get cleaned up when
3043  // optimization is enabled. This also assumes that delegate calls are
3044  // performed exactly once for a set of arguments, but that should be safe.
3045  } else if (getLangOpts().ObjCAutoRefCount &&
3046  param->hasAttr<NSConsumedAttr>() &&
3047  type->isObjCRetainableType()) {
3048  llvm::Value *ptr = Builder.CreateLoad(local);
3049  auto null =
3050  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3051  Builder.CreateStore(null, local);
3052  args.add(RValue::get(ptr), type);
3053 
3054  // For the most part, we just need to load the alloca, except that
3055  // aggregate r-values are actually pointers to temporaries.
3056  } else {
3057  args.add(convertTempToRValue(local, type, loc), type);
3058  }
3059 }
3060 
3061 static bool isProvablyNull(llvm::Value *addr) {
3062  return isa<llvm::ConstantPointerNull>(addr);
3063 }
3064 
3065 /// Emit the actual writing-back of a writeback.
3067  const CallArgList::Writeback &writeback) {
3068  const LValue &srcLV = writeback.Source;
3069  Address srcAddr = srcLV.getAddress();
3070  assert(!isProvablyNull(srcAddr.getPointer()) &&
3071  "shouldn't have writeback for provably null argument");
3072 
3073  llvm::BasicBlock *contBB = nullptr;
3074 
3075  // If the argument wasn't provably non-null, we need to null check
3076  // before doing the store.
3077  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3078  CGF.CGM.getDataLayout());
3079  if (!provablyNonNull) {
3080  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3081  contBB = CGF.createBasicBlock("icr.done");
3082 
3083  llvm::Value *isNull =
3084  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3085  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3086  CGF.EmitBlock(writebackBB);
3087  }
3088 
3089  // Load the value to writeback.
3090  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3091 
3092  // Cast it back, in case we're writing an id to a Foo* or something.
3093  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3094  "icr.writeback-cast");
3095 
3096  // Perform the writeback.
3097 
3098  // If we have a "to use" value, it's something we need to emit a use
3099  // of. This has to be carefully threaded in: if it's done after the
3100  // release it's potentially undefined behavior (and the optimizer
3101  // will ignore it), and if it happens before the retain then the
3102  // optimizer could move the release there.
3103  if (writeback.ToUse) {
3104  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3105 
3106  // Retain the new value. No need to block-copy here: the block's
3107  // being passed up the stack.
3108  value = CGF.EmitARCRetainNonBlock(value);
3109 
3110  // Emit the intrinsic use here.
3111  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3112 
3113  // Load the old value (primitively).
3114  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3115 
3116  // Put the new value in place (primitively).
3117  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3118 
3119  // Release the old value.
3120  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3121 
3122  // Otherwise, we can just do a normal lvalue store.
3123  } else {
3124  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3125  }
3126 
3127  // Jump to the continuation block.
3128  if (!provablyNonNull)
3129  CGF.EmitBlock(contBB);
3130 }
3131 
3133  const CallArgList &args) {
3134  for (const auto &I : args.writebacks())
3135  emitWriteback(CGF, I);
3136 }
3137 
3139  const CallArgList &CallArgs) {
3142  CallArgs.getCleanupsToDeactivate();
3143  // Iterate in reverse to increase the likelihood of popping the cleanup.
3144  for (const auto &I : llvm::reverse(Cleanups)) {
3145  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3146  I.IsActiveIP->eraseFromParent();
3147  }
3148 }
3149 
3150 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3151  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3152  if (uop->getOpcode() == UO_AddrOf)
3153  return uop->getSubExpr();
3154  return nullptr;
3155 }
3156 
3157 /// Emit an argument that's being passed call-by-writeback. That is,
3158 /// we are passing the address of an __autoreleased temporary; it
3159 /// might be copy-initialized with the current value of the given
3160 /// address, but it will definitely be copied out of after the call.
3162  const ObjCIndirectCopyRestoreExpr *CRE) {
3163  LValue srcLV;
3164 
3165  // Make an optimistic effort to emit the address as an l-value.
3166  // This can fail if the argument expression is more complicated.
3167  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3168  srcLV = CGF.EmitLValue(lvExpr);
3169 
3170  // Otherwise, just emit it as a scalar.
3171  } else {
3172  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3173 
3174  QualType srcAddrType =
3175  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3176  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3177  }
3178  Address srcAddr = srcLV.getAddress();
3179 
3180  // The dest and src types don't necessarily match in LLVM terms
3181  // because of the crazy ObjC compatibility rules.
3182 
3183  llvm::PointerType *destType =
3184  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3185 
3186  // If the address is a constant null, just pass the appropriate null.
3187  if (isProvablyNull(srcAddr.getPointer())) {
3188  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3189  CRE->getType());
3190  return;
3191  }
3192 
3193  // Create the temporary.
3194  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3195  CGF.getPointerAlign(),
3196  "icr.temp");
3197  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3198  // and that cleanup will be conditional if we can't prove that the l-value
3199  // isn't null, so we need to register a dominating point so that the cleanups
3200  // system will make valid IR.
3202 
3203  // Zero-initialize it if we're not doing a copy-initialization.
3204  bool shouldCopy = CRE->shouldCopy();
3205  if (!shouldCopy) {
3206  llvm::Value *null =
3207  llvm::ConstantPointerNull::get(
3208  cast<llvm::PointerType>(destType->getElementType()));
3209  CGF.Builder.CreateStore(null, temp);
3210  }
3211 
3212  llvm::BasicBlock *contBB = nullptr;
3213  llvm::BasicBlock *originBB = nullptr;
3214 
3215  // If the address is *not* known to be non-null, we need to switch.
3216  llvm::Value *finalArgument;
3217 
3218  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3219  CGF.CGM.getDataLayout());
3220  if (provablyNonNull) {
3221  finalArgument = temp.getPointer();
3222  } else {
3223  llvm::Value *isNull =
3224  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3225 
3226  finalArgument = CGF.Builder.CreateSelect(isNull,
3227  llvm::ConstantPointerNull::get(destType),
3228  temp.getPointer(), "icr.argument");
3229 
3230  // If we need to copy, then the load has to be conditional, which
3231  // means we need control flow.
3232  if (shouldCopy) {
3233  originBB = CGF.Builder.GetInsertBlock();
3234  contBB = CGF.createBasicBlock("icr.cont");
3235  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3236  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3237  CGF.EmitBlock(copyBB);
3238  condEval.begin(CGF);
3239  }
3240  }
3241 
3242  llvm::Value *valueToUse = nullptr;
3243 
3244  // Perform a copy if necessary.
3245  if (shouldCopy) {
3246  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3247  assert(srcRV.isScalar());
3248 
3249  llvm::Value *src = srcRV.getScalarVal();
3250  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3251  "icr.cast");
3252 
3253  // Use an ordinary store, not a store-to-lvalue.
3254  CGF.Builder.CreateStore(src, temp);
3255 
3256  // If optimization is enabled, and the value was held in a
3257  // __strong variable, we need to tell the optimizer that this
3258  // value has to stay alive until we're doing the store back.
3259  // This is because the temporary is effectively unretained,
3260  // and so otherwise we can violate the high-level semantics.
3261  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3263  valueToUse = src;
3264  }
3265  }
3266 
3267  // Finish the control flow if we needed it.
3268  if (shouldCopy && !provablyNonNull) {
3269  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3270  CGF.EmitBlock(contBB);
3271 
3272  // Make a phi for the value to intrinsically use.
3273  if (valueToUse) {
3274  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3275  "icr.to-use");
3276  phiToUse->addIncoming(valueToUse, copyBB);
3277  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3278  originBB);
3279  valueToUse = phiToUse;
3280  }
3281 
3282  condEval.end(CGF);
3283  }
3284 
3285  args.addWriteback(srcLV, temp, valueToUse);
3286  args.add(RValue::get(finalArgument), CRE->getType());
3287 }
3288 
3290  assert(!StackBase);
3291 
3292  // Save the stack.
3293  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3294  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3295 }
3296 
3298  if (StackBase) {
3299  // Restore the stack after the call.
3300  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3301  CGF.Builder.CreateCall(F, StackBase);
3302  }
3303 }
3304 
3306  SourceLocation ArgLoc,
3307  AbstractCallee AC,
3308  unsigned ParmNum) {
3309  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3310  SanOpts.has(SanitizerKind::NullabilityArg)))
3311  return;
3312 
3313  // The param decl may be missing in a variadic function.
3314  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3315  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3316 
3317  // Prefer the nonnull attribute if it's present.
3318  const NonNullAttr *NNAttr = nullptr;
3319  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3320  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3321 
3322  bool CanCheckNullability = false;
3323  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3324  auto Nullability = PVD->getType()->getNullability(getContext());
3325  CanCheckNullability = Nullability &&
3327  PVD->getTypeSourceInfo();
3328  }
3329 
3330  if (!NNAttr && !CanCheckNullability)
3331  return;
3332 
3333  SourceLocation AttrLoc;
3334  SanitizerMask CheckKind;
3335  SanitizerHandler Handler;
3336  if (NNAttr) {
3337  AttrLoc = NNAttr->getLocation();
3338  CheckKind = SanitizerKind::NonnullAttribute;
3339  Handler = SanitizerHandler::NonnullArg;
3340  } else {
3341  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3342  CheckKind = SanitizerKind::NullabilityArg;
3343  Handler = SanitizerHandler::NullabilityArg;
3344  }
3345 
3346  SanitizerScope SanScope(this);
3347  assert(RV.isScalar());
3348  llvm::Value *V = RV.getScalarVal();
3349  llvm::Value *Cond =
3350  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3351  llvm::Constant *StaticData[] = {
3352  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3353  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3354  };
3355  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3356 }
3357 
3359  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3360  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3361  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3362  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3363 
3364  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3365  // because arguments are destroyed left to right in the callee. As a special
3366  // case, there are certain language constructs that require left-to-right
3367  // evaluation, and in those cases we consider the evaluation order requirement
3368  // to trump the "destruction order is reverse construction order" guarantee.
3369  bool LeftToRight =
3370  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3371  ? Order == EvaluationOrder::ForceLeftToRight
3372  : Order != EvaluationOrder::ForceRightToLeft;
3373 
3374  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3375  RValue EmittedArg) {
3376  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3377  return;
3378  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3379  if (PS == nullptr)
3380  return;
3381 
3382  const auto &Context = getContext();
3383  auto SizeTy = Context.getSizeType();
3384  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3385  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3386  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3387  EmittedArg.getScalarVal());
3388  Args.add(RValue::get(V), SizeTy);
3389  // If we're emitting args in reverse, be sure to do so with
3390  // pass_object_size, as well.
3391  if (!LeftToRight)
3392  std::swap(Args.back(), *(&Args.back() - 1));
3393  };
3394 
3395  // Insert a stack save if we're going to need any inalloca args.
3396  bool HasInAllocaArgs = false;
3397  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3398  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3399  I != E && !HasInAllocaArgs; ++I)
3400  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3401  if (HasInAllocaArgs) {
3402  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3403  Args.allocateArgumentMemory(*this);
3404  }
3405  }
3406 
3407  // Evaluate each argument in the appropriate order.
3408  size_t CallArgsStart = Args.size();
3409  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3410  unsigned Idx = LeftToRight ? I : E - I - 1;
3411  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3412  unsigned InitialArgSize = Args.size();
3413  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3414  // the argument and parameter match or the objc method is parameterized.
3415  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3416  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3417  ArgTypes[Idx]) ||
3418  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3419  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3420  "Argument and parameter types don't match");
3421  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3422  // In particular, we depend on it being the last arg in Args, and the
3423  // objectsize bits depend on there only being one arg if !LeftToRight.
3424  assert(InitialArgSize + 1 == Args.size() &&
3425  "The code below depends on only adding one arg per EmitCallArg");
3426  (void)InitialArgSize;
3427  RValue RVArg = Args.back().RV;
3428  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3429  ParamsToSkip + Idx);
3430  // @llvm.objectsize should never have side-effects and shouldn't need
3431  // destruction/cleanups, so we can safely "emit" it after its arg,
3432  // regardless of right-to-leftness
3433  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3434  }
3435 
3436  if (!LeftToRight) {
3437  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3438  // IR function.
3439  std::reverse(Args.begin() + CallArgsStart, Args.end());
3440  }
3441 }
3442 
3443 namespace {
3444 
3445 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3446  DestroyUnpassedArg(Address Addr, QualType Ty)
3447  : Addr(Addr), Ty(Ty) {}
3448 
3449  Address Addr;
3450  QualType Ty;
3451 
3452  void Emit(CodeGenFunction &CGF, Flags flags) override {
3453  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3454  assert(!Dtor->isTrivial());
3455  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3456  /*Delegating=*/false, Addr);
3457  }
3458 };
3459 
3460 struct DisableDebugLocationUpdates {
3461  CodeGenFunction &CGF;
3462  bool disabledDebugInfo;
3463  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3464  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3465  CGF.disableDebugInfo();
3466  }
3467  ~DisableDebugLocationUpdates() {
3468  if (disabledDebugInfo)
3469  CGF.enableDebugInfo();
3470  }
3471 };
3472 
3473 } // end anonymous namespace
3474 
3476  QualType type) {
3477  DisableDebugLocationUpdates Dis(*this, E);
3478  if (const ObjCIndirectCopyRestoreExpr *CRE
3479  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3480  assert(getLangOpts().ObjCAutoRefCount);
3481  return emitWritebackArg(*this, args, CRE);
3482  }
3483 
3484  assert(type->isReferenceType() == E->isGLValue() &&
3485  "reference binding to unmaterialized r-value!");
3486 
3487  if (E->isGLValue()) {
3488  assert(E->getObjectKind() == OK_Ordinary);
3489  return args.add(EmitReferenceBindingToExpr(E), type);
3490  }
3491 
3492  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3493 
3494  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3495  // However, we still have to push an EH-only cleanup in case we unwind before
3496  // we make it to the call.
3497  if (HasAggregateEvalKind &&
3498  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3499  // If we're using inalloca, use the argument memory. Otherwise, use a
3500  // temporary.
3501  AggValueSlot Slot;
3502  if (args.isUsingInAlloca())
3503  Slot = createPlaceholderSlot(*this, type);
3504  else
3505  Slot = CreateAggTemp(type, "agg.tmp");
3506 
3507  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3508  bool DestroyedInCallee =
3509  RD && RD->hasNonTrivialDestructor() &&
3510  CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
3511  if (DestroyedInCallee)
3512  Slot.setExternallyDestructed();
3513 
3514  EmitAggExpr(E, Slot);
3515  RValue RV = Slot.asRValue();
3516  args.add(RV, type);
3517 
3518  if (DestroyedInCallee) {
3519  // Create a no-op GEP between the placeholder and the cleanup so we can
3520  // RAUW it successfully. It also serves as a marker of the first
3521  // instruction where the cleanup is active.
3522  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3523  type);
3524  // This unreachable is a temporary marker which will be removed later.
3525  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3526  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3527  }
3528  return;
3529  }
3530 
3531  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3532  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3533  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3534  assert(L.isSimple());
3535  if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3536  args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3537  } else {
3538  // We can't represent a misaligned lvalue in the CallArgList, so copy
3539  // to an aligned temporary now.
3540  Address tmp = CreateMemTemp(type);
3541  EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3542  args.add(RValue::getAggregate(tmp), type);
3543  }
3544  return;
3545  }
3546 
3547  args.add(EmitAnyExprToTemp(E), type);
3548 }
3549 
3550 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3551  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3552  // implicitly widens null pointer constants that are arguments to varargs
3553  // functions to pointer-sized ints.
3554  if (!getTarget().getTriple().isOSWindows())
3555  return Arg->getType();
3556 
3557  if (Arg->getType()->isIntegerType() &&
3558  getContext().getTypeSize(Arg->getType()) <
3562  return getContext().getIntPtrType();
3563  }
3564 
3565  return Arg->getType();
3566 }
3567 
3568 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3569 // optimizer it can aggressively ignore unwind edges.
3570 void
3571 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3572  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3573  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3574  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3575  CGM.getNoObjCARCExceptionsMetadata());
3576 }
3577 
3578 /// Emits a call to the given no-arguments nounwind runtime function.
3579 llvm::CallInst *
3581  const llvm::Twine &name) {
3582  return EmitNounwindRuntimeCall(callee, None, name);
3583 }
3584 
3585 /// Emits a call to the given nounwind runtime function.
3586 llvm::CallInst *
3589  const llvm::Twine &name) {
3590  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3591  call->setDoesNotThrow();
3592  return call;
3593 }
3594 
3595 /// Emits a simple call (never an invoke) to the given no-arguments
3596 /// runtime function.
3597 llvm::CallInst *
3599  const llvm::Twine &name) {
3600  return EmitRuntimeCall(callee, None, name);
3601 }
3602 
3603 // Calls which may throw must have operand bundles indicating which funclet
3604 // they are nested within.
3605 static void
3606 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
3608  // There is no need for a funclet operand bundle if we aren't inside a
3609  // funclet.
3610  if (!CurrentFuncletPad)
3611  return;
3612 
3613  // Skip intrinsics which cannot throw.
3614  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3615  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3616  return;
3617 
3618  BundleList.emplace_back("funclet", CurrentFuncletPad);
3619 }
3620 
3621 /// Emits a simple call (never an invoke) to the given runtime function.
3622 llvm::CallInst *
3625  const llvm::Twine &name) {
3627  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3628 
3629  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
3630  call->setCallingConv(getRuntimeCC());
3631  return call;
3632 }
3633 
3634 /// Emits a call or invoke to the given noreturn runtime function.
3636  ArrayRef<llvm::Value*> args) {
3638  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3639 
3640  if (getInvokeDest()) {
3641  llvm::InvokeInst *invoke =
3642  Builder.CreateInvoke(callee,
3643  getUnreachableBlock(),
3644  getInvokeDest(),
3645  args,
3646  BundleList);
3647  invoke->setDoesNotReturn();
3648  invoke->setCallingConv(getRuntimeCC());
3649  } else {
3650  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3651  call->setDoesNotReturn();
3652  call->setCallingConv(getRuntimeCC());
3653  Builder.CreateUnreachable();
3654  }
3655 }
3656 
3657 /// Emits a call or invoke instruction to the given nullary runtime function.
3658 llvm::CallSite
3660  const Twine &name) {
3661  return EmitRuntimeCallOrInvoke(callee, None, name);
3662 }
3663 
3664 /// Emits a call or invoke instruction to the given runtime function.
3665 llvm::CallSite
3668  const Twine &name) {
3669  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3670  callSite.setCallingConv(getRuntimeCC());
3671  return callSite;
3672 }
3673 
3674 /// Emits a call or invoke instruction to the given function, depending
3675 /// on the current state of the EH stack.
3676 llvm::CallSite
3679  const Twine &Name) {
3680  llvm::BasicBlock *InvokeDest = getInvokeDest();
3682  getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3683 
3684  llvm::Instruction *Inst;
3685  if (!InvokeDest)
3686  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3687  else {
3688  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3689  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3690  Name);
3691  EmitBlock(ContBB);
3692  }
3693 
3694  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3695  // optimizer it can aggressively ignore unwind edges.
3696  if (CGM.getLangOpts().ObjCAutoRefCount)
3697  AddObjCARCExceptionMetadata(Inst);
3698 
3699  return llvm::CallSite(Inst);
3700 }
3701 
3702 /// \brief Store a non-aggregate value to an address to initialize it. For
3703 /// initialization, a non-atomic store will be used.
3705  LValue Dst) {
3706  if (Src.isScalar())
3707  CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3708  else
3709  CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3710 }
3711 
3712 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3713  llvm::Value *New) {
3714  DeferredReplacements.push_back(std::make_pair(Old, New));
3715 }
3716 
3718  const CGCallee &Callee,
3719  ReturnValueSlot ReturnValue,
3720  const CallArgList &CallArgs,
3721  llvm::Instruction **callOrInvoke) {
3722  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3723 
3724  assert(Callee.isOrdinary());
3725 
3726  // Handle struct-return functions by passing a pointer to the
3727  // location that we would like to return into.
3728  QualType RetTy = CallInfo.getReturnType();
3729  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3730 
3731  llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3732 
3733  // 1. Set up the arguments.
3734 
3735  // If we're using inalloca, insert the allocation after the stack save.
3736  // FIXME: Do this earlier rather than hacking it in here!
3737  Address ArgMemory = Address::invalid();
3738  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3739  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3740  const llvm::DataLayout &DL = CGM.getDataLayout();
3741  ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3742  llvm::Instruction *IP = CallArgs.getStackBase();
3743  llvm::AllocaInst *AI;
3744  if (IP) {
3745  IP = IP->getNextNode();
3746  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3747  "argmem", IP);
3748  } else {
3749  AI = CreateTempAlloca(ArgStruct, "argmem");
3750  }
3751  auto Align = CallInfo.getArgStructAlignment();
3752  AI->setAlignment(Align.getQuantity());
3753  AI->setUsedWithInAlloca(true);
3754  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3755  ArgMemory = Address(AI, Align);
3756  }
3757 
3758  // Helper function to drill into the inalloca allocation.
3759  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3760  auto FieldOffset =
3761  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3762  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3763  };
3764 
3765  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3766  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3767 
3768  // If the call returns a temporary with struct return, create a temporary
3769  // alloca to hold the result, unless one is given to us.
3770  Address SRetPtr = Address::invalid();
3771  size_t UnusedReturnSize = 0;
3772  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3773  if (!ReturnValue.isNull()) {
3774  SRetPtr = ReturnValue.getValue();
3775  } else {
3776  SRetPtr = CreateMemTemp(RetTy);
3777  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3778  uint64_t size =
3779  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3780  if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3781  UnusedReturnSize = size;
3782  }
3783  }
3784  if (IRFunctionArgs.hasSRetArg()) {
3785  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3786  } else if (RetAI.isInAlloca()) {
3787  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3788  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3789  }
3790  }
3791 
3792  Address swiftErrorTemp = Address::invalid();
3793  Address swiftErrorArg = Address::invalid();
3794 
3795  // Translate all of the arguments as necessary to match the IR lowering.
3796  assert(CallInfo.arg_size() == CallArgs.size() &&
3797  "Mismatch between function signature & arguments.");
3798  unsigned ArgNo = 0;
3799  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3800  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3801  I != E; ++I, ++info_it, ++ArgNo) {
3802  const ABIArgInfo &ArgInfo = info_it->info;
3803  RValue RV = I->RV;
3804 
3805  // Insert a padding argument to ensure proper alignment.
3806  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3807  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3808  llvm::UndefValue::get(ArgInfo.getPaddingType());
3809 
3810  unsigned FirstIRArg, NumIRArgs;
3811  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3812 
3813  switch (ArgInfo.getKind()) {
3814  case ABIArgInfo::InAlloca: {
3815  assert(NumIRArgs == 0);
3816  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3817  if (RV.isAggregate()) {
3818  // Replace the placeholder with the appropriate argument slot GEP.
3819  llvm::Instruction *Placeholder =
3820  cast<llvm::Instruction>(RV.getAggregatePointer());
3821  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3822  Builder.SetInsertPoint(Placeholder);
3823  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3824  Builder.restoreIP(IP);
3825  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3826  } else {
3827  // Store the RValue into the argument struct.
3828  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3829  unsigned AS = Addr.getType()->getPointerAddressSpace();
3830  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3831  // There are some cases where a trivial bitcast is not avoidable. The
3832  // definition of a type later in a translation unit may change it's type
3833  // from {}* to (%struct.foo*)*.
3834  if (Addr.getType() != MemType)
3835  Addr = Builder.CreateBitCast(Addr, MemType);
3836  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3837  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3838  }
3839  break;
3840  }
3841 
3842  case ABIArgInfo::Indirect: {
3843  assert(NumIRArgs == 1);
3844  if (RV.isScalar() || RV.isComplex()) {
3845  // Make a temporary alloca to pass the argument.
3846  Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
3847  "indirect-arg-temp", false);
3848  IRCallArgs[FirstIRArg] = Addr.getPointer();
3849 
3850  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3851  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3852  } else {
3853  // We want to avoid creating an unnecessary temporary+copy here;
3854  // however, we need one in three cases:
3855  // 1. If the argument is not byval, and we are required to copy the
3856  // source. (This case doesn't occur on any common architecture.)
3857  // 2. If the argument is byval, RV is not sufficiently aligned, and
3858  // we cannot force it to be sufficiently aligned.
3859  // 3. If the argument is byval, but RV is located in an address space
3860  // different than that of the argument (0).
3861  Address Addr = RV.getAggregateAddress();
3862  CharUnits Align = ArgInfo.getIndirectAlign();
3863  const llvm::DataLayout *TD = &CGM.getDataLayout();
3864  const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3865  const unsigned ArgAddrSpace =
3866  (FirstIRArg < IRFuncTy->getNumParams()
3867  ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3868  : 0);
3869  if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3870  (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3871  llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3872  Align.getQuantity(), *TD)
3873  < Align.getQuantity()) ||
3874  (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3875  // Create an aligned temporary, and copy to it.
3876  Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
3877  "byval-temp", false);
3878  IRCallArgs[FirstIRArg] = AI.getPointer();
3879  EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3880  } else {
3881  // Skip the extra memcpy call.
3882  IRCallArgs[FirstIRArg] = Addr.getPointer();
3883  }
3884  }
3885  break;
3886  }
3887 
3888  case ABIArgInfo::Ignore:
3889  assert(NumIRArgs == 0);
3890  break;
3891 
3892  case ABIArgInfo::Extend:
3893  case ABIArgInfo::Direct: {
3894  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3895  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3896  ArgInfo.getDirectOffset() == 0) {
3897  assert(NumIRArgs == 1);
3898  llvm::Value *V;
3899  if (RV.isScalar())
3900  V = RV.getScalarVal();
3901  else
3902  V = Builder.CreateLoad(RV.getAggregateAddress());
3903 
3904  // Implement swifterror by copying into a new swifterror argument.
3905  // We'll write back in the normal path out of the call.
3906  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3908  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3909 
3910  QualType pointeeTy = I->Ty->getPointeeType();
3911  swiftErrorArg =
3912  Address(V, getContext().getTypeAlignInChars(pointeeTy));
3913 
3914  swiftErrorTemp =
3915  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3916  V = swiftErrorTemp.getPointer();
3917  cast<llvm::AllocaInst>(V)->setSwiftError(true);
3918 
3919  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3920  Builder.CreateStore(errorValue, swiftErrorTemp);
3921  }
3922 
3923  // We might have to widen integers, but we should never truncate.
3924  if (ArgInfo.getCoerceToType() != V->getType() &&
3925  V->getType()->isIntegerTy())
3926  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3927 
3928  // If the argument doesn't match, perform a bitcast to coerce it. This
3929  // can happen due to trivial type mismatches.
3930  if (FirstIRArg < IRFuncTy->getNumParams() &&
3931  V->getType() != IRFuncTy->getParamType(FirstIRArg))
3932  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3933 
3934  IRCallArgs[FirstIRArg] = V;
3935  break;
3936  }
3937 
3938  // FIXME: Avoid the conversion through memory if possible.
3939  Address Src = Address::invalid();
3940  if (RV.isScalar() || RV.isComplex()) {
3941  Src = CreateMemTemp(I->Ty, "coerce");
3942  LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3943  EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3944  } else {
3945  Src = RV.getAggregateAddress();
3946  }
3947 
3948  // If the value is offset in memory, apply the offset now.
3949  Src = emitAddressAtOffset(*this, Src, ArgInfo);
3950 
3951  // Fast-isel and the optimizer generally like scalar values better than
3952  // FCAs, so we flatten them if this is safe to do for this argument.
3953  llvm::StructType *STy =
3954  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3955  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3956  llvm::Type *SrcTy = Src.getType()->getElementType();
3957  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3958  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3959 
3960  // If the source type is smaller than the destination type of the
3961  // coerce-to logic, copy the source value into a temp alloca the size
3962  // of the destination type to allow loading all of it. The bits past
3963  // the source value are left undef.
3964  if (SrcSize < DstSize) {
3965  Address TempAlloca
3966  = CreateTempAlloca(STy, Src.getAlignment(),
3967  Src.getName() + ".coerce");
3968  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3969  Src = TempAlloca;
3970  } else {
3971  Src = Builder.CreateBitCast(Src,
3972  STy->getPointerTo(Src.getAddressSpace()));
3973  }
3974 
3975  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3976  assert(NumIRArgs == STy->getNumElements());
3977  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3978  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3979  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3980  llvm::Value *LI = Builder.CreateLoad(EltPtr);
3981  IRCallArgs[FirstIRArg + i] = LI;
3982  }
3983  } else {
3984  // In the simple case, just pass the coerced loaded value.
3985  assert(NumIRArgs == 1);
3986  IRCallArgs[FirstIRArg] =
3987  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3988  }
3989 
3990  break;
3991  }
3992 
3994  auto coercionType = ArgInfo.getCoerceAndExpandType();
3995  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
3996 
3997  llvm::Value *tempSize = nullptr;
3998  Address addr = Address::invalid();
3999  if (RV.isAggregate()) {
4000  addr = RV.getAggregateAddress();
4001  } else {
4002  assert(RV.isScalar()); // complex should always just be direct
4003 
4004  llvm::Type *scalarType = RV.getScalarVal()->getType();
4005  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4006  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4007 
4008  tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
4009 
4010  // Materialize to a temporary.
4011  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4012  CharUnits::fromQuantity(std::max(layout->getAlignment(),
4013  scalarAlign)));
4014  EmitLifetimeStart(scalarSize, addr.getPointer());
4015 
4016  Builder.CreateStore(RV.getScalarVal(), addr);
4017  }
4018 
4019  addr = Builder.CreateElementBitCast(addr, coercionType);
4020 
4021  unsigned IRArgPos = FirstIRArg;
4022  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4023  llvm::Type *eltType = coercionType->getElementType(i);
4024  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4025  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4026  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4027  IRCallArgs[IRArgPos++] = elt;
4028  }
4029  assert(IRArgPos == FirstIRArg + NumIRArgs);
4030 
4031  if (tempSize) {
4032  EmitLifetimeEnd(tempSize, addr.getPointer());
4033  }
4034 
4035  break;
4036  }
4037 
4038  case ABIArgInfo::Expand:
4039  unsigned IRArgPos = FirstIRArg;
4040  ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
4041  assert(IRArgPos == FirstIRArg + NumIRArgs);
4042  break;
4043  }
4044  }
4045 
4046  llvm::Value *CalleePtr = Callee.getFunctionPointer();
4047 
4048  // If we're using inalloca, set up that argument.
4049  if (ArgMemory.isValid()) {
4050  llvm::Value *Arg = ArgMemory.getPointer();
4051  if (CallInfo.isVariadic()) {
4052  // When passing non-POD arguments by value to variadic functions, we will
4053  // end up with a variadic prototype and an inalloca call site. In such
4054  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4055  // the callee.
4056  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4057  auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4058  CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4059  } else {
4060  llvm::Type *LastParamTy =
4061  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4062  if (Arg->getType() != LastParamTy) {
4063 #ifndef NDEBUG
4064  // Assert that these structs have equivalent element types.
4065  llvm::StructType *FullTy = CallInfo.getArgStruct();
4066  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4067  cast<llvm::PointerType>(LastParamTy)->getElementType());
4068  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4069  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4070  DE = DeclaredTy->element_end(),
4071  FI = FullTy->element_begin();
4072  DI != DE; ++DI, ++FI)
4073  assert(*DI == *FI);
4074 #endif
4075  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4076  }
4077  }
4078  assert(IRFunctionArgs.hasInallocaArg());
4079  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4080  }
4081 
4082  // 2. Prepare the function pointer.
4083 
4084  // If the callee is a bitcast of a non-variadic function to have a
4085  // variadic function pointer type, check to see if we can remove the
4086  // bitcast. This comes up with unprototyped functions.
4087  //
4088  // This makes the IR nicer, but more importantly it ensures that we
4089  // can inline the function at -O0 if it is marked always_inline.
4090  auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4091  llvm::FunctionType *CalleeFT =
4092  cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4093  if (!CalleeFT->isVarArg())
4094  return Ptr;
4095 
4096  llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4097  if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4098  return Ptr;
4099 
4100  llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4101  if (!OrigFn)
4102  return Ptr;
4103 
4104  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4105 
4106  // If the original type is variadic, or if any of the component types
4107  // disagree, we cannot remove the cast.
4108  if (OrigFT->isVarArg() ||
4109  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4110  OrigFT->getReturnType() != CalleeFT->getReturnType())
4111  return Ptr;
4112 
4113  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4114  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4115  return Ptr;
4116 
4117  return OrigFn;
4118  };
4119  CalleePtr = simplifyVariadicCallee(CalleePtr);
4120 
4121  // 3. Perform the actual call.
4122 
4123  // Deactivate any cleanups that we're supposed to do immediately before
4124  // the call.
4125  if (!CallArgs.getCleanupsToDeactivate().empty())
4126  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4127 
4128  // Assert that the arguments we computed match up. The IR verifier
4129  // will catch this, but this is a common enough source of problems
4130  // during IRGen changes that it's way better for debugging to catch
4131  // it ourselves here.
4132 #ifndef NDEBUG
4133  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4134  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4135  // Inalloca argument can have different type.
4136  if (IRFunctionArgs.hasInallocaArg() &&
4137  i == IRFunctionArgs.getInallocaArgNo())
4138  continue;
4139  if (i < IRFuncTy->getNumParams())
4140  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4141  }
4142 #endif
4143 
4144  // Compute the calling convention and attributes.
4145  unsigned CallingConv;
4146  llvm::AttributeList Attrs;
4147  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4148  Callee.getAbstractInfo(), Attrs, CallingConv,
4149  /*AttrOnCallSite=*/true);
4150 
4151  // Apply some call-site-specific attributes.
4152  // TODO: work this into building the attribute set.
4153 
4154  // Apply always_inline to all calls within flatten functions.
4155  // FIXME: should this really take priority over __try, below?
4156  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4157  !(Callee.getAbstractInfo().getCalleeDecl() &&
4158  Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4159  Attrs =
4160  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4161  llvm::Attribute::AlwaysInline);
4162  }
4163 
4164  // Disable inlining inside SEH __try blocks.
4165  if (isSEHTryScope()) {
4166  Attrs =
4167  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4168  llvm::Attribute::NoInline);
4169  }
4170 
4171  // Decide whether to use a call or an invoke.
4172  bool CannotThrow;
4173  if (currentFunctionUsesSEHTry()) {
4174  // SEH cares about asynchronous exceptions, so everything can "throw."
4175  CannotThrow = false;
4176  } else if (isCleanupPadScope() &&
4178  // The MSVC++ personality will implicitly terminate the program if an
4179  // exception is thrown during a cleanup outside of a try/catch.
4180  // We don't need to model anything in IR to get this behavior.
4181  CannotThrow = true;
4182  } else {
4183  // Otherwise, nounwind call sites will never throw.
4184  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4185  llvm::Attribute::NoUnwind);
4186  }
4187  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4188 
4190  getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
4191 
4192  // Emit the actual call/invoke instruction.
4193  llvm::CallSite CS;
4194  if (!InvokeDest) {
4195  CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4196  } else {
4197  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4198  CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4199  BundleList);
4200  EmitBlock(Cont);
4201  }
4202  llvm::Instruction *CI = CS.getInstruction();
4203  if (callOrInvoke)
4204  *callOrInvoke = CI;
4205 
4206  // Apply the attributes and calling convention.
4207  CS.setAttributes(Attrs);
4208  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4209 
4210  // Apply various metadata.
4211 
4212  if (!CI->getType()->isVoidTy())
4213  CI->setName("call");
4214 
4215  // Insert instrumentation or attach profile metadata at indirect call sites.
4216  // For more details, see the comment before the definition of
4217  // IPVK_IndirectCallTarget in InstrProfData.inc.
4218  if (!CS.getCalledFunction())
4219  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4220  CI, CalleePtr);
4221 
4222  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4223  // optimizer it can aggressively ignore unwind edges.
4224  if (CGM.getLangOpts().ObjCAutoRefCount)
4225  AddObjCARCExceptionMetadata(CI);
4226 
4227  // Suppress tail calls if requested.
4228  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4229  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4230  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4231  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4232  }
4233 
4234  // 4. Finish the call.
4235 
4236  // If the call doesn't return, finish the basic block and clear the
4237  // insertion point; this allows the rest of IRGen to discard
4238  // unreachable code.
4239  if (CS.doesNotReturn()) {
4240  if (UnusedReturnSize)
4241  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4242  SRetPtr.getPointer());
4243 
4244  Builder.CreateUnreachable();
4245  Builder.ClearInsertionPoint();
4246 
4247  // FIXME: For now, emit a dummy basic block because expr emitters in
4248  // generally are not ready to handle emitting expressions at unreachable
4249  // points.
4250  EnsureInsertPoint();
4251 
4252  // Return a reasonable RValue.
4253  return GetUndefRValue(RetTy);
4254  }
4255 
4256  // Perform the swifterror writeback.
4257  if (swiftErrorTemp.isValid()) {
4258  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4259  Builder.CreateStore(errorResult, swiftErrorArg);
4260  }
4261 
4262  // Emit any call-associated writebacks immediately. Arguably this
4263  // should happen after any return-value munging.
4264  if (CallArgs.hasWritebacks())
4265  emitWritebacks(*this, CallArgs);
4266 
4267  // The stack cleanup for inalloca arguments has to run out of the normal
4268  // lexical order, so deactivate it and run it manually here.
4269  CallArgs.freeArgumentMemory(*this);
4270 
4271  // Extract the return value.
4272  RValue Ret = [&] {
4273  switch (RetAI.getKind()) {
4275  auto coercionType = RetAI.getCoerceAndExpandType();
4276  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4277 
4278  Address addr = SRetPtr;
4279  addr = Builder.CreateElementBitCast(addr, coercionType);
4280 
4281  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4282  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4283 
4284  unsigned unpaddedIndex = 0;
4285  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4286  llvm::Type *eltType = coercionType->getElementType(i);
4287  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4288  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4289  llvm::Value *elt = CI;
4290  if (requiresExtract)
4291  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4292  else
4293  assert(unpaddedIndex == 0);
4294  Builder.CreateStore(elt, eltAddr);
4295  }
4296  // FALLTHROUGH
4297  LLVM_FALLTHROUGH;
4298  }
4299 
4300  case ABIArgInfo::InAlloca:
4301  case ABIArgInfo::Indirect: {
4302  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4303  if (UnusedReturnSize)
4304  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4305  SRetPtr.getPointer());
4306  return ret;
4307  }
4308 
4309  case ABIArgInfo::Ignore:
4310  // If we are ignoring an argument that had a result, make sure to
4311  // construct the appropriate return value for our caller.
4312  return GetUndefRValue(RetTy);
4313 
4314  case ABIArgInfo::Extend:
4315  case ABIArgInfo::Direct: {
4316  llvm::Type *RetIRTy = ConvertType(RetTy);
4317  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4318  switch (getEvaluationKind(RetTy)) {
4319  case TEK_Complex: {
4320  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4321  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4322  return RValue::getComplex(std::make_pair(Real, Imag));
4323  }
4324  case TEK_Aggregate: {
4325  Address DestPtr = ReturnValue.getValue();
4326  bool DestIsVolatile = ReturnValue.isVolatile();
4327 
4328  if (!DestPtr.isValid()) {
4329  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4330  DestIsVolatile = false;
4331  }
4332  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4333  return RValue::getAggregate(DestPtr);
4334  }
4335  case TEK_Scalar: {
4336  // If the argument doesn't match, perform a bitcast to coerce it. This
4337  // can happen due to trivial type mismatches.
4338  llvm::Value *V = CI;
4339  if (V->getType() != RetIRTy)
4340  V = Builder.CreateBitCast(V, RetIRTy);
4341  return RValue::get(V);
4342  }
4343  }
4344  llvm_unreachable("bad evaluation kind");
4345  }
4346 
4347  Address DestPtr = ReturnValue.getValue();
4348  bool DestIsVolatile = ReturnValue.isVolatile();
4349 
4350  if (!DestPtr.isValid()) {
4351  DestPtr = CreateMemTemp(RetTy, "coerce");
4352  DestIsVolatile = false;
4353  }
4354 
4355  // If the value is offset in memory, apply the offset now.
4356  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4357  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4358 
4359  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4360  }
4361 
4362  case ABIArgInfo::Expand:
4363  llvm_unreachable("Invalid ABI kind for return argument");
4364  }
4365 
4366  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4367  } ();
4368 
4369  // Emit the assume_aligned check on the return value.
4370  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4371  if (Ret.isScalar() && TargetDecl) {
4372  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4373  llvm::Value *OffsetValue = nullptr;
4374  if (const auto *Offset = AA->getOffset())
4375  OffsetValue = EmitScalarExpr(Offset);
4376 
4377  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4378  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4379  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4380  OffsetValue);
4381  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4382  llvm::Value *ParamVal =
4383  CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
4384  EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4385  }
4386  }
4387 
4388  return Ret;
4389 }
4390 
4391 /* VarArg handling */
4392 
4394  VAListAddr = VE->isMicrosoftABI()
4395  ? EmitMSVAListRef(VE->getSubExpr())
4396  : EmitVAListRef(VE->getSubExpr());
4397  QualType Ty = VE->getType();
4398  if (VE->isMicrosoftABI())
4399  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4400  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4401 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:640
bool isAggregate() const
Definition: CGValue.h:54
const llvm::DataLayout & getDataLayout() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1517
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:177
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:281
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3304
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
TargetOptions & getTargetOpts() const
Retrieve the target options.
Definition: TargetInfo.h:122
FunctionDecl - An instance of this class is created to represent a function declaration or definition...
Definition: Decl.h:1698
Address getAddress() const
Definition: CGValue.h:555
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:614
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
Definition: CGCall.cpp:2938
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2285
Complete object ctor.
Definition: ABI.h:26
CanQualType VoidPtrTy
Definition: ASTContext.h:1012
A (possibly-)qualified type.
Definition: Type.h:653
bool isBlockPointerType() const
Definition: Type.h:5952
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses &#39;sret&#39; when used as a return type.
Definition: CGCall.cpp:1481
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:1856
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:79
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3066
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1093
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:71
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2678
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:549
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:175
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3058
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:187
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:456
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:790
const Decl * getCalleeDecl() const
Definition: CGCall.h:62
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:3670
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:788
Extend - Valid only for integer argument types.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:967
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4393
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3061
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:242
bool isVirtual() const
Definition: DeclCXX.h:2009
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const Expr * getSubExpr() const
Definition: Expr.h:3796
bool isVolatile() const
Definition: CGValue.h:298
The base class of the type hierarchy.
Definition: Type.h:1353
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1836
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2152
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:5784
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:946
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:671
const ParmVarDecl * getParamDecl(unsigned I) const
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3677
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:1981
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:360
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2397
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2132
bool hasWritebacks() const
Definition: CGCall.h:232
Default closure variant of a ctor.
Definition: ABI.h:30
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3341
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
VarDecl - An instance of this class is created to represent a variable declaration or definition...
Definition: Decl.h:807
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:104
llvm::Instruction * getStackBase() const
Definition: CGCall.h:254
unsigned getNumParams() const
Definition: Type.h:3491
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:157
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1197
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6307
void setCoerceToType(llvm::Type *T)
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3159
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:139
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, LValue Dst)
Store a non-aggregate value to an address to initialize it.
Definition: CGCall.cpp:3704
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3305
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:234
llvm::Value * getPointer() const
Definition: Address.h:38
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:288
Address getValue() const
Definition: CGCall.h:301
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
ParmVarDecl - Represents a parameter to a function.
Definition: Decl.h:1514
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:57
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:46
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:407
RecordDecl - Represents a struct/union/class.
Definition: Decl.h:3482
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3297
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:311
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2371
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:859
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3132
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2758
Address getAddress() const
Definition: CGValue.h:324
unsigned getRegParm() const
Definition: Type.h:3133
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:149
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:3671
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
field_range fields() const
Definition: Decl.h:3613
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2214
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
Definition: Decl.h:2461
CharUnits getAlignment() const
Definition: CGValue.h:313
RequiredArgs getRequiredArgs() const
bool isUsingInAlloca() const
Returns if we&#39;re using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:259
unsigned getFunctionScopeIndex() const
Returns the index of this parameter in its prototype or method scope.
Definition: Decl.h:1567
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isOrdinary() const
Definition: CGCall.h:150
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:263
CharUnits getArgStructAlignment() const
bool isReferenceType() const
Definition: Type.h:5956
Interesting information about a specific parameter that can&#39;t simply be reflected in parameter&#39;s type...
Definition: Type.h:3291
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2204
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that&#39;s being passed call-by-writeback.
Definition: CGCall.cpp:3161
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:241
bool getProducesResult() const
Definition: Type.h:3129
llvm::FunctionType * getFunctionType() const
Definition: CGCall.h:161
bool isGLValue() const
Definition: Expr.h:252
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:282
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2551
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:157
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
uint32_t Offset
Definition: CacheTokens.cpp:43
llvm::StructType * getCoerceAndExpandType() const
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:222
Wrapper for source info for functions.
Definition: TypeLoc.h:1396
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:109
unsigned getInAllocaFieldIndex() const
bool isComplex() const
Definition: CGValue.h:53
const_arg_iterator arg_begin() const
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:66
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:378
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1785
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:259
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:134
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:455
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:689
Values of this type can never be null.
bool isNothrow(const ASTContext &Ctx, bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:3612
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
bool isSimple() const
Definition: CGValue.h:249
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:263
bool isInstance() const
Definition: DeclCXX.h:1992
An ordinary object is located at an address in memory.
Definition: Specifiers.h:123
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1458
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:94
FunctionType::ExtInfo getExtInfo() const
QualType getReturnType() const
Definition: DeclObjC.h:361
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:724
bool getNoReturn() const
Definition: Type.h:3128
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:84
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:71
bool getNoCallerSavedRegs() const
Definition: Type.h:3130
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3475
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e.g., it is an unsigned integer type or a vector.
Definition: Type.cpp:1870
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:497
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Derives the &#39;this&#39; type for codegen purposes, i.e.
Definition: CGCall.cpp:73
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3179
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:486
Represents a K&R-style &#39;int foo()&#39; function, which has no information available about its arguments...
Definition: Type.h:3235
bool hasAttr() const
Definition: DeclBase.h:535
CanQualType getReturnType() const
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:346
bool isValid() const
Definition: Address.h:36
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1590
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3270
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:3801
const TargetCodeGenInfo & getTargetCodeGenInfo()
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
writeback_const_range writebacks() const
Definition: CGCall.h:237
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:226
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:3022
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:3778
Address Temporary
The temporary alloca.
Definition: CGCall.h:193
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:107
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:196
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:3002
Expr - This represents one expression.
Definition: Expr.h:106
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2570
static Address invalid()
Definition: Address.h:35
llvm::Type * getUnpaddedCoerceAndExpandType() const
const FunctionProtoType * T
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:2997
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:552
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:88
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:66
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:682
bool getHasRegParm() const
Definition: Type.h:3131
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6370
bool isObjCRetainableType() const
Definition: Type.cpp:3824
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2620
llvm::Constant * objc_retain
id objc_retain(id);
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
void add(RValue rvalue, QualType type, bool needscopy=false)
Definition: CGCall.h:207
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:344
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2539
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when &#39;sret&#39; is used as a return type...
Definition: CGCall.cpp:1485
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:627
QualType getType() const
Definition: Expr.h:128
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1327
bool hasNonTrivialDestructor() const
Determine whether this class has a non-trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1410
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2696
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:197
llvm::PointerType * AllocaInt8PtrTy
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1717
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:2004
ASTContext & getContext() const
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:444
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:495
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1143
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:233
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:35
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1249
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:412
CanQualType getCanonicalTypeUnqualified() const
std::string CPU
If given, the name of the target CPU to generate code for.
Definition: TargetOptions.h:36
The l-value was considered opaque, so the alignment was determined from a type.
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1274
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:180
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses &#39;fpret&#39; when used as a return type.
Definition: CGCall.cpp:1490
CanProxy< U > castAs() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3150
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3275
Encodes a location in the source.
QualType getReturnType() const
Definition: Type.h:3203
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2088
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:278
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3666
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1230
CallingConv getCC() const
Definition: Type.h:3140
const Decl * getDecl() const
Definition: GlobalDecl.h:64
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C &#39;SEL&#39; type.
Definition: ASTContext.h:1810
An aggregate value slot.
Definition: CGValue.h:434
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:442
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:1964
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2321
const_arg_iterator arg_end() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ObjCEntrypoints & getObjCEntrypoints() const
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3289
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings startin...
Definition: TargetOptions.h:55
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:1816
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:706
CanQualType VoidTy
Definition: ASTContext.h:996
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
bool isAnyPointerType() const
Definition: Type.h:5948
An aligned address.
Definition: Address.h:25
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:558
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:178
All available information about a concrete callee.
Definition: CGCall.h:66
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:352
Complete object dtor.
Definition: ABI.h:36
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses &#39;fp2ret&#39; when used as a return type.
Definition: CGCall.cpp:1507
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1668
bool hasFlexibleArrayMember() const
Definition: Decl.h:3535
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3526
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:607
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:529
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1107
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:276
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:176
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
const CGCalleeInfo & getAbstractInfo() const
Definition: CGCall.h:153
Dataflow Directional Tag Classes.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2343
uint64_t SanitizerMask
Definition: Sanitizers.h:24
ExtInfo getExtInfo() const
Definition: Type.h:3214
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:93
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
LValue Source
The original argument.
Definition: CGCall.h:190
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:419
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3635
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:91
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:988
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:167
void EmitARCIntrinsicUse(ArrayRef< llvm::Value *> values)
Given a number of pointers, inform the optimizer that they&#39;re being intrinsically used up until this ...
Definition: CGObjC.cpp:1803
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change...
Definition: TargetCXXABI.h:216
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2085
SourceLocation getLocStart() const LLVM_READONLY
Definition: Decl.h:732
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:783
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:388
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
static void getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, SmallVectorImpl< llvm::OperandBundleDef > &BundleList)
Definition: CGCall.cpp:3606
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isVolatileQualified() const
Definition: CGValue.h:56
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:3978
Complex values, per C99 6.2.5p11.
Definition: Type.h:2225
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
static bool classof(const OMPClause *T)
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:1976
QualType getCanonicalTypeInternal() const
Definition: Type.h:2109
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Definition: CGCXXABI.h:129
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:6193
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable &#39;self&#39;, remove it.
Definition: CGCall.cpp:2639
CharUnits getIndirectAlign() const
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:44
T * getAttr() const
Definition: DeclBase.h:531
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:640
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:120
llvm::Value * getAggregatePointer() const
Definition: CGValue.h:76
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
Expand - Only valid for aggregate argument types.
const CGFunctionInfo & arrangeMSMemberPointerThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes &#39;this&#39; as the first parameter followed by varargs.
Definition: CGCall.cpp:516
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2421
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type *>::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:966
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:889
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:436
Represents a base class of a C++ class.
Definition: DeclCXX.h:191
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
RValue asAggregateRValue() const
Definition: CGValue.h:428
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2007
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:1986
ASTContext & getContext() const
Definition: CodeGenTypes.h:174
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:134
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:517
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl...
Definition: CGCall.cpp:1652
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g., it is an signed integer type or a vector.
Definition: Type.cpp:1830
Represents a C++ struct/union/class.
Definition: DeclCXX.h:299
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:330
bool isVoidType() const
Definition: Type.h:6171
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2190
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:5747
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1170
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like &#39;int&#39;.
Definition: Type.h:2145
bool isVariadic() const
Definition: DeclObjC.h:454
bool shouldCopy() const
shouldCopy - True if we should do the &#39;copy&#39; part of the copy-restore.
Definition: ExprObjC.h:1547
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1682
Copying closure variant of a ctor.
Definition: ABI.h:29
Defines the clang::TargetInfo interface.
StringRef getName() const
getName - Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:270
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows)
Definition: CGCall.cpp:194
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
Definition: CGCall.cpp:526
CanQualType IntTy
Definition: ASTContext.h:1004
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration&#39;s cl...
Definition: DeclCXX.h:3255
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
Definition: CGCall.cpp:3138
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition: CGCall.h:59
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
static RValue get(llvm::Value *V)
Definition: CGValue.h:86
bool isUnion() const
Definition: Decl.h:3159
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional, const FunctionDecl *FD)
Compute the arguments required by the given formal prototype, given that there may be some additional...
bool isPointerType() const
Definition: Type.h:5944
unsigned getNumRequiredArgs() const
unsigned getDirectOffset() const
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments...
Definition: CGCall.cpp:597
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CXXCtorType toCXXCtorType(StructorType T)
Definition: CodeGenTypes.h:65
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
QualType getType() const
Definition: Decl.h:639
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:107
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
LValue - This represents an lvalue references.
Definition: CGValue.h:167
An abstract representation of regular/ObjC call/message targets.
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:147
RValue asRValue() const
Definition: CGValue.h:571
llvm::Type * getCoerceToType() const
Notes how many arguments were added to the beginning (Prefix) and ending (Suffix) of an arg list...
Definition: CGCXXABI.h:300
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2407
void AddDefaultFnAttrs(llvm::Function &F)
Adds attributes to F according to our CodeGenOptions and LangOptions, as though we had emitted it our...
Definition: CGCall.cpp:1777
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:182
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:662
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:2620
Abstract information about a function or function prototype.
Definition: CGCall.h:44
A class which abstracts out some details necessary for making a call.
Definition: Type.h:3083
bool isScalar() const
Definition: CGValue.h:52
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke=nullptr)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3717
This parameter (which must have pointer type) is a Swift indirect result parameter.
ConstructorUsingShadowDecl * getShadowDecl() const
Definition: De