clang  6.0.0svn
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
47  switch (CC) {
48  default: return llvm::CallingConv::C;
49  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53  case CC_Win64: return llvm::CallingConv::Win64;
54  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58  // TODO: Add support for __pascal to LLVM.
60  // TODO: Add support for __vectorcall to LLVM.
61  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66  case CC_Swift: return llvm::CallingConv::Swift;
67  }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
71 /// qualification.
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76 }
77 
78 /// Returns the canonical formal type of the given C++ method.
80  return MD->getType()->getCanonicalTypeUnqualified()
82 }
83 
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type. Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
90 }
91 
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
96  // When translating an unprototyped function type, always use a
97  // variadic type.
98  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99  /*instanceMethod=*/false,
100  /*chainCall=*/false, None,
101  FTNP->getExtInfo(), {}, RequiredArgs(0));
102 }
103 
106  const FunctionProtoType *proto,
107  unsigned prefixArgs,
108  unsigned totalArgs) {
109  assert(proto->hasExtParameterInfos());
110  assert(paramInfos.size() <= prefixArgs);
111  assert(proto->getNumParams() + prefixArgs <= totalArgs);
112 
113  paramInfos.reserve(totalArgs);
114 
115  // Add default infos for any prefix args that don't already have infos.
116  paramInfos.resize(prefixArgs);
117 
118  // Add infos for the prototype.
119  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120  paramInfos.push_back(ParamInfo);
121  // pass_object_size params have no parameter info.
122  if (ParamInfo.hasPassObjectSize())
123  paramInfos.emplace_back();
124  }
125 
126  assert(paramInfos.size() <= totalArgs &&
127  "Did we forget to insert pass_object_size args?");
128  // Add default infos for the variadic and/or suffix arguments.
129  paramInfos.resize(totalArgs);
130 }
131 
132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
138  // Fast path: don't touch param info if we don't need to.
139  if (!FPT->hasExtParameterInfos()) {
140  assert(paramInfos.empty() &&
141  "We have paramInfos, but the prototype doesn't?");
142  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143  return;
144  }
145 
146  unsigned PrefixSize = prefix.size();
147  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148  // parameters; the only thing that can change this is the presence of
149  // pass_object_size. So, we preallocate for the common case.
150  prefix.reserve(prefix.size() + FPT->getNumParams());
151 
152  auto ExtInfos = FPT->getExtParameterInfos();
153  assert(ExtInfos.size() == FPT->getNumParams());
154  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155  prefix.push_back(FPT->getParamType(I));
156  if (ExtInfos[I].hasPassObjectSize())
157  prefix.push_back(CGT.getContext().getSizeType());
158  }
159 
160  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161  prefix.size());
162 }
163 
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
170  const FunctionDecl *FD) {
172  RequiredArgs Required =
173  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174  // FIXME: Kill copy.
175  appendParameterTypes(CGT, prefix, paramInfos, FTP);
176  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177 
178  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179  /*chainCall=*/false, prefix,
180  FTP->getExtInfo(), paramInfos,
181  Required);
182 }
183 
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
188  const FunctionDecl *FD) {
190  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191  FTP, FD);
192 }
193 
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195  // Set the appropriate calling convention for the Function.
196  if (D->hasAttr<StdCallAttr>())
197  return CC_X86StdCall;
198 
199  if (D->hasAttr<FastCallAttr>())
200  return CC_X86FastCall;
201 
202  if (D->hasAttr<RegCallAttr>())
203  return CC_X86RegCall;
204 
205  if (D->hasAttr<ThisCallAttr>())
206  return CC_X86ThisCall;
207 
208  if (D->hasAttr<VectorCallAttr>())
209  return CC_X86VectorCall;
210 
211  if (D->hasAttr<PascalAttr>())
212  return CC_X86Pascal;
213 
214  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216 
217  if (D->hasAttr<IntelOclBiccAttr>())
218  return CC_IntelOclBicc;
219 
220  if (D->hasAttr<MSABIAttr>())
221  return IsWindows ? CC_C : CC_Win64;
222 
223  if (D->hasAttr<SysVABIAttr>())
224  return IsWindows ? CC_X86_64SysV : CC_C;
225 
226  if (D->hasAttr<PreserveMostAttr>())
227  return CC_PreserveMost;
228 
229  if (D->hasAttr<PreserveAllAttr>())
230  return CC_PreserveAll;
231 
232  return CC_C;
233 }
234 
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 /// so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
243  const FunctionProtoType *FTP,
244  const CXXMethodDecl *MD) {
246 
247  // Add the 'this' pointer.
248  if (RD)
249  argTypes.push_back(GetThisType(Context, RD));
250  else
251  argTypes.push_back(Context.VoidPtrTy);
252 
254  *this, true, argTypes,
256 }
257 
258 /// Arrange the argument and result information for a declaration or
259 /// definition of the given C++ non-static member function. The
260 /// member function must be an ordinary function, i.e. not a
261 /// constructor or destructor.
262 const CGFunctionInfo &
264  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
265  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
266 
268 
269  if (MD->isInstance()) {
270  // The abstract case is perfectly fine.
271  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
272  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
273  }
274 
275  return arrangeFreeFunctionType(prototype, MD);
276 }
277 
279  const InheritedConstructor &Inherited, CXXCtorType Type) {
280  // Parameters are unnecessary if we're constructing a base class subobject
281  // and the inherited constructor lives in a virtual base.
282  return Type == Ctor_Complete ||
283  !Inherited.getShadowDecl()->constructsVirtualBase() ||
284  !Target.getCXXABI().hasConstructorVariants();
285  }
286 
287 const CGFunctionInfo &
289  StructorType Type) {
290 
293  argTypes.push_back(GetThisType(Context, MD->getParent()));
294 
295  bool PassParams = true;
296 
297  GlobalDecl GD;
298  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
299  GD = GlobalDecl(CD, toCXXCtorType(Type));
300 
301  // A base class inheriting constructor doesn't get forwarded arguments
302  // needed to construct a virtual base (or base class thereof).
303  if (auto Inherited = CD->getInheritedConstructor())
304  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
305  } else {
306  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
307  GD = GlobalDecl(DD, toCXXDtorType(Type));
308  }
309 
311 
312  // Add the formal parameters.
313  if (PassParams)
314  appendParameterTypes(*this, argTypes, paramInfos, FTP);
315 
316  CGCXXABI::AddedStructorArgs AddedArgs =
317  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
318  if (!paramInfos.empty()) {
319  // Note: prefix implies after the first param.
320  if (AddedArgs.Prefix)
321  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
323  if (AddedArgs.Suffix)
324  paramInfos.append(AddedArgs.Suffix,
326  }
327 
328  RequiredArgs required =
329  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
331 
332  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
333  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
334  ? argTypes.front()
335  : TheCXXABI.hasMostDerivedReturn(GD)
336  ? CGM.getContext().VoidPtrTy
337  : Context.VoidTy;
338  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
339  /*chainCall=*/false, argTypes, extInfo,
340  paramInfos, required);
341 }
342 
346  for (auto &arg : args)
347  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
348  return argTypes;
349 }
350 
354  for (auto &arg : args)
355  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
356  return argTypes;
357 }
358 
361  unsigned prefixArgs, unsigned totalArgs) {
363  if (proto->hasExtParameterInfos()) {
364  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
365  }
366  return result;
367 }
368 
369 /// Arrange a call to a C++ method, passing the given arguments.
370 ///
371 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
372 /// parameter.
373 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
374 /// args.
375 /// PassProtoArgs indicates whether `args` has args for the parameters in the
376 /// given CXXConstructorDecl.
377 const CGFunctionInfo &
379  const CXXConstructorDecl *D,
380  CXXCtorType CtorKind,
381  unsigned ExtraPrefixArgs,
382  unsigned ExtraSuffixArgs,
383  bool PassProtoArgs) {
384  // FIXME: Kill copy.
386  for (const auto &Arg : args)
387  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
388 
389  // +1 for implicit this, which should always be args[0].
390  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
391 
393  RequiredArgs Required =
394  RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
395  GlobalDecl GD(D, CtorKind);
396  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
397  ? ArgTypes.front()
398  : TheCXXABI.hasMostDerivedReturn(GD)
399  ? CGM.getContext().VoidPtrTy
400  : Context.VoidTy;
401 
402  FunctionType::ExtInfo Info = FPT->getExtInfo();
404  // If the prototype args are elided, we should only have ABI-specific args,
405  // which never have param info.
406  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
407  // ABI-specific suffix arguments are treated the same as variadic arguments.
408  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
409  ArgTypes.size());
410  }
411  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
412  /*chainCall=*/false, ArgTypes, Info,
413  ParamInfos, Required);
414 }
415 
416 /// Arrange the argument and result information for the declaration or
417 /// definition of the given function.
418 const CGFunctionInfo &
420  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
421  if (MD->isInstance())
422  return arrangeCXXMethodDeclaration(MD);
423 
425 
426  assert(isa<FunctionType>(FTy));
427 
428  // When declaring a function without a prototype, always use a
429  // non-variadic type.
432  noProto->getReturnType(), /*instanceMethod=*/false,
433  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
434  }
435 
437 }
438 
439 /// Arrange the argument and result information for the declaration or
440 /// definition of an Objective-C method.
441 const CGFunctionInfo &
443  // It happens that this is the same as a call with no optional
444  // arguments, except also using the formal 'self' type.
446 }
447 
448 /// Arrange the argument and result information for the function type
449 /// through which to perform a send to the given Objective-C method,
450 /// using the given receiver type. The receiver type is not always
451 /// the 'self' type of the method or even an Objective-C pointer type.
452 /// This is *not* the right method for actually performing such a
453 /// message send, due to the possibility of optional arguments.
454 const CGFunctionInfo &
456  QualType receiverType) {
459  argTys.push_back(Context.getCanonicalParamType(receiverType));
460  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
461  // FIXME: Kill copy?
462  for (const auto *I : MD->parameters()) {
463  argTys.push_back(Context.getCanonicalParamType(I->getType()));
465  I->hasAttr<NoEscapeAttr>());
466  extParamInfos.push_back(extParamInfo);
467  }
468 
469  FunctionType::ExtInfo einfo;
470  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
471  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
472 
473  if (getContext().getLangOpts().ObjCAutoRefCount &&
474  MD->hasAttr<NSReturnsRetainedAttr>())
475  einfo = einfo.withProducesResult(true);
476 
477  RequiredArgs required =
478  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
479 
481  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
482  /*chainCall=*/false, argTys, einfo, extParamInfos, required);
483 }
484 
485 const CGFunctionInfo &
487  const CallArgList &args) {
488  auto argTypes = getArgTypesForCall(Context, args);
489  FunctionType::ExtInfo einfo;
490 
492  GetReturnType(returnType), /*instanceMethod=*/false,
493  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
494 }
495 
496 const CGFunctionInfo &
498  // FIXME: Do we need to handle ObjCMethodDecl?
499  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
500 
501  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
503 
504  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
506 
507  return arrangeFunctionDeclaration(FD);
508 }
509 
510 /// Arrange a thunk that takes 'this' as the first parameter followed by
511 /// varargs. Return a void pointer, regardless of the actual return type.
512 /// The body of the thunk will end in a musttail call to a function of the
513 /// correct type, and the caller will bitcast the function to the correct
514 /// prototype.
515 const CGFunctionInfo &
517  assert(MD->isVirtual() && "only virtual memptrs have thunks");
519  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
520  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
521  /*chainCall=*/false, ArgTys,
522  FTP->getExtInfo(), {}, RequiredArgs(1));
523 }
524 
525 const CGFunctionInfo &
527  CXXCtorType CT) {
528  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
529 
532  const CXXRecordDecl *RD = CD->getParent();
533  ArgTys.push_back(GetThisType(Context, RD));
534  if (CT == Ctor_CopyingClosure)
535  ArgTys.push_back(*FTP->param_type_begin());
536  if (RD->getNumVBases() > 0)
537  ArgTys.push_back(Context.IntTy);
539  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
540  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
541  /*chainCall=*/false, ArgTys,
542  FunctionType::ExtInfo(CC), {},
544 }
545 
546 /// Arrange a call as unto a free function, except possibly with an
547 /// additional number of formal parameters considered required.
548 static const CGFunctionInfo &
550  CodeGenModule &CGM,
551  const CallArgList &args,
552  const FunctionType *fnType,
553  unsigned numExtraRequiredArgs,
554  bool chainCall) {
555  assert(args.size() >= numExtraRequiredArgs);
556 
558 
559  // In most cases, there are no optional arguments.
560  RequiredArgs required = RequiredArgs::All;
561 
562  // If we have a variadic prototype, the required arguments are the
563  // extra prefix plus the arguments in the prototype.
564  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
565  if (proto->isVariadic())
566  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
567 
568  if (proto->hasExtParameterInfos())
569  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
570  args.size());
571 
572  // If we don't have a prototype at all, but we're supposed to
573  // explicitly use the variadic convention for unprototyped calls,
574  // treat all of the arguments as required but preserve the nominal
575  // possibility of variadics.
576  } else if (CGM.getTargetCodeGenInfo()
577  .isNoProtoCallVariadic(args,
578  cast<FunctionNoProtoType>(fnType))) {
579  required = RequiredArgs(args.size());
580  }
581 
582  // FIXME: Kill copy.
584  for (const auto &arg : args)
585  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
587  /*instanceMethod=*/false, chainCall,
588  argTypes, fnType->getExtInfo(), paramInfos,
589  required);
590 }
591 
592 /// Figure out the rules for calling a function with the given formal
593 /// type using the given arguments. The arguments are necessary
594 /// because the function might be unprototyped, in which case it's
595 /// target-dependent in crazy ways.
596 const CGFunctionInfo &
598  const FunctionType *fnType,
599  bool chainCall) {
600  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
601  chainCall ? 1 : 0, chainCall);
602 }
603 
604 /// A block function is essentially a free function with an
605 /// extra implicit argument.
606 const CGFunctionInfo &
608  const FunctionType *fnType) {
609  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
610  /*chainCall=*/false);
611 }
612 
613 const CGFunctionInfo &
615  const FunctionArgList &params) {
616  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
617  auto argTypes = getArgTypesForDeclaration(Context, params);
618 
620  GetReturnType(proto->getReturnType()),
621  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
622  proto->getExtInfo(), paramInfos,
623  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
624 }
625 
626 const CGFunctionInfo &
628  const CallArgList &args) {
629  // FIXME: Kill copy.
631  for (const auto &Arg : args)
632  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
634  GetReturnType(resultType), /*instanceMethod=*/false,
635  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
636  /*paramInfos=*/ {}, RequiredArgs::All);
637 }
638 
639 const CGFunctionInfo &
641  const FunctionArgList &args) {
642  auto argTypes = getArgTypesForDeclaration(Context, args);
643 
645  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
646  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
647 }
648 
649 const CGFunctionInfo &
651  ArrayRef<CanQualType> argTypes) {
653  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
654  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
655 }
656 
657 /// Arrange a call to a C++ method, passing the given arguments.
658 ///
659 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
660 /// does not count `this`.
661 const CGFunctionInfo &
663  const FunctionProtoType *proto,
664  RequiredArgs required,
665  unsigned numPrefixArgs) {
666  assert(numPrefixArgs + 1 <= args.size() &&
667  "Emitting a call with less args than the required prefix?");
668  // Add one to account for `this`. It's a bit awkward here, but we don't count
669  // `this` in similar places elsewhere.
670  auto paramInfos =
671  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
672 
673  // FIXME: Kill copy.
674  auto argTypes = getArgTypesForCall(Context, args);
675 
676  FunctionType::ExtInfo info = proto->getExtInfo();
678  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
679  /*chainCall=*/false, argTypes, info, paramInfos, required);
680 }
681 
684  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
686 }
687 
688 const CGFunctionInfo &
690  const CallArgList &args) {
691  assert(signature.arg_size() <= args.size());
692  if (signature.arg_size() == args.size())
693  return signature;
694 
696  auto sigParamInfos = signature.getExtParameterInfos();
697  if (!sigParamInfos.empty()) {
698  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
699  paramInfos.resize(args.size());
700  }
701 
702  auto argTypes = getArgTypesForCall(Context, args);
703 
704  assert(signature.getRequiredArgs().allowsOptionalArgs());
705  return arrangeLLVMFunctionInfo(signature.getReturnType(),
706  signature.isInstanceMethod(),
707  signature.isChainCall(),
708  argTypes,
709  signature.getExtInfo(),
710  paramInfos,
711  signature.getRequiredArgs());
712 }
713 
714 namespace clang {
715 namespace CodeGen {
717 }
718 }
719 
720 /// Arrange the argument and result information for an abstract value
721 /// of a given function type. This is the method which all of the
722 /// above functions ultimately defer to.
723 const CGFunctionInfo &
725  bool instanceMethod,
726  bool chainCall,
727  ArrayRef<CanQualType> argTypes,
730  RequiredArgs required) {
731  assert(std::all_of(argTypes.begin(), argTypes.end(),
732  [](CanQualType T) { return T.isCanonicalAsParam(); }));
733 
734  // Lookup or create unique function info.
735  llvm::FoldingSetNodeID ID;
736  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
737  required, resultType, argTypes);
738 
739  void *insertPos = nullptr;
740  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
741  if (FI)
742  return *FI;
743 
744  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
745 
746  // Construct the function info. We co-allocate the ArgInfos.
747  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
748  paramInfos, resultType, argTypes, required);
749  FunctionInfos.InsertNode(FI, insertPos);
750 
751  bool inserted = FunctionsBeingProcessed.insert(FI).second;
752  (void)inserted;
753  assert(inserted && "Recursively being processed?");
754 
755  // Compute ABI information.
756  if (CC == llvm::CallingConv::SPIR_KERNEL) {
757  // Force target independent argument handling for the host visible
758  // kernel functions.
759  computeSPIRKernelABIInfo(CGM, *FI);
760  } else if (info.getCC() == CC_Swift) {
761  swiftcall::computeABIInfo(CGM, *FI);
762  } else {
763  getABIInfo().computeInfo(*FI);
764  }
765 
766  // Loop over all of the computed argument and return value info. If any of
767  // them are direct or extend without a specified coerce type, specify the
768  // default now.
769  ABIArgInfo &retInfo = FI->getReturnInfo();
770  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
771  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
772 
773  for (auto &I : FI->arguments())
774  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
775  I.info.setCoerceToType(ConvertType(I.type));
776 
777  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
778  assert(erased && "Not in set?");
779 
780  return *FI;
781 }
782 
784  bool instanceMethod,
785  bool chainCall,
786  const FunctionType::ExtInfo &info,
787  ArrayRef<ExtParameterInfo> paramInfos,
788  CanQualType resultType,
789  ArrayRef<CanQualType> argTypes,
790  RequiredArgs required) {
791  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
792 
793  void *buffer =
794  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
795  argTypes.size() + 1, paramInfos.size()));
796 
797  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
798  FI->CallingConvention = llvmCC;
799  FI->EffectiveCallingConvention = llvmCC;
800  FI->ASTCallingConvention = info.getCC();
801  FI->InstanceMethod = instanceMethod;
802  FI->ChainCall = chainCall;
803  FI->NoReturn = info.getNoReturn();
804  FI->ReturnsRetained = info.getProducesResult();
805  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
806  FI->Required = required;
807  FI->HasRegParm = info.getHasRegParm();
808  FI->RegParm = info.getRegParm();
809  FI->ArgStruct = nullptr;
810  FI->ArgStructAlign = 0;
811  FI->NumArgs = argTypes.size();
812  FI->HasExtParameterInfos = !paramInfos.empty();
813  FI->getArgsBuffer()[0].type = resultType;
814  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
815  FI->getArgsBuffer()[i + 1].type = argTypes[i];
816  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
817  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
818  return FI;
819 }
820 
821 /***/
822 
823 namespace {
824 // ABIArgInfo::Expand implementation.
825 
826 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
827 struct TypeExpansion {
828  enum TypeExpansionKind {
829  // Elements of constant arrays are expanded recursively.
830  TEK_ConstantArray,
831  // Record fields are expanded recursively (but if record is a union, only
832  // the field with the largest size is expanded).
833  TEK_Record,
834  // For complex types, real and imaginary parts are expanded recursively.
835  TEK_Complex,
836  // All other types are not expandable.
837  TEK_None
838  };
839 
840  const TypeExpansionKind Kind;
841 
842  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
843  virtual ~TypeExpansion() {}
844 };
845 
846 struct ConstantArrayExpansion : TypeExpansion {
847  QualType EltTy;
848  uint64_t NumElts;
849 
850  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
851  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
852  static bool classof(const TypeExpansion *TE) {
853  return TE->Kind == TEK_ConstantArray;
854  }
855 };
856 
857 struct RecordExpansion : TypeExpansion {
859 
861 
862  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
864  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
865  Fields(std::move(Fields)) {}
866  static bool classof(const TypeExpansion *TE) {
867  return TE->Kind == TEK_Record;
868  }
869 };
870 
871 struct ComplexExpansion : TypeExpansion {
872  QualType EltTy;
873 
874  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
875  static bool classof(const TypeExpansion *TE) {
876  return TE->Kind == TEK_Complex;
877  }
878 };
879 
880 struct NoExpansion : TypeExpansion {
881  NoExpansion() : TypeExpansion(TEK_None) {}
882  static bool classof(const TypeExpansion *TE) {
883  return TE->Kind == TEK_None;
884  }
885 };
886 } // namespace
887 
888 static std::unique_ptr<TypeExpansion>
889 getTypeExpansion(QualType Ty, const ASTContext &Context) {
890  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
891  return llvm::make_unique<ConstantArrayExpansion>(
892  AT->getElementType(), AT->getSize().getZExtValue());
893  }
894  if (const RecordType *RT = Ty->getAs<RecordType>()) {
897  const RecordDecl *RD = RT->getDecl();
898  assert(!RD->hasFlexibleArrayMember() &&
899  "Cannot expand structure with flexible array.");
900  if (RD->isUnion()) {
901  // Unions can be here only in degenerative cases - all the fields are same
902  // after flattening. Thus we have to use the "largest" field.
903  const FieldDecl *LargestFD = nullptr;
904  CharUnits UnionSize = CharUnits::Zero();
905 
906  for (const auto *FD : RD->fields()) {
907  // Skip zero length bitfields.
908  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
909  continue;
910  assert(!FD->isBitField() &&
911  "Cannot expand structure with bit-field members.");
912  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
913  if (UnionSize < FieldSize) {
914  UnionSize = FieldSize;
915  LargestFD = FD;
916  }
917  }
918  if (LargestFD)
919  Fields.push_back(LargestFD);
920  } else {
921  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
922  assert(!CXXRD->isDynamicClass() &&
923  "cannot expand vtable pointers in dynamic classes");
924  for (const CXXBaseSpecifier &BS : CXXRD->bases())
925  Bases.push_back(&BS);
926  }
927 
928  for (const auto *FD : RD->fields()) {
929  // Skip zero length bitfields.
930  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
931  continue;
932  assert(!FD->isBitField() &&
933  "Cannot expand structure with bit-field members.");
934  Fields.push_back(FD);
935  }
936  }
937  return llvm::make_unique<RecordExpansion>(std::move(Bases),
938  std::move(Fields));
939  }
940  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
941  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
942  }
943  return llvm::make_unique<NoExpansion>();
944 }
945 
946 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
947  auto Exp = getTypeExpansion(Ty, Context);
948  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
949  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
950  }
951  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
952  int Res = 0;
953  for (auto BS : RExp->Bases)
954  Res += getExpansionSize(BS->getType(), Context);
955  for (auto FD : RExp->Fields)
956  Res += getExpansionSize(FD->getType(), Context);
957  return Res;
958  }
959  if (isa<ComplexExpansion>(Exp.get()))
960  return 2;
961  assert(isa<NoExpansion>(Exp.get()));
962  return 1;
963 }
964 
965 void
968  auto Exp = getTypeExpansion(Ty, Context);
969  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
970  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
971  getExpandedTypes(CAExp->EltTy, TI);
972  }
973  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
974  for (auto BS : RExp->Bases)
975  getExpandedTypes(BS->getType(), TI);
976  for (auto FD : RExp->Fields)
977  getExpandedTypes(FD->getType(), TI);
978  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
979  llvm::Type *EltTy = ConvertType(CExp->EltTy);
980  *TI++ = EltTy;
981  *TI++ = EltTy;
982  } else {
983  assert(isa<NoExpansion>(Exp.get()));
984  *TI++ = ConvertType(Ty);
985  }
986 }
987 
989  ConstantArrayExpansion *CAE,
990  Address BaseAddr,
991  llvm::function_ref<void(Address)> Fn) {
992  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
993  CharUnits EltAlign =
994  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
995 
996  for (int i = 0, n = CAE->NumElts; i < n; i++) {
997  llvm::Value *EltAddr =
998  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
999  Fn(Address(EltAddr, EltAlign));
1000  }
1001 }
1002 
1003 void CodeGenFunction::ExpandTypeFromArgs(
1005  assert(LV.isSimple() &&
1006  "Unexpected non-simple lvalue during struct expansion.");
1007 
1008  auto Exp = getTypeExpansion(Ty, getContext());
1009  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1010  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1011  [&](Address EltAddr) {
1012  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1013  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1014  });
1015  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1016  Address This = LV.getAddress();
1017  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1018  // Perform a single step derived-to-base conversion.
1019  Address Base =
1020  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1021  /*NullCheckValue=*/false, SourceLocation());
1022  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1023 
1024  // Recurse onto bases.
1025  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1026  }
1027  for (auto FD : RExp->Fields) {
1028  // FIXME: What are the right qualifiers here?
1029  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1030  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1031  }
1032  } else if (isa<ComplexExpansion>(Exp.get())) {
1033  auto realValue = *AI++;
1034  auto imagValue = *AI++;
1035  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1036  } else {
1037  assert(isa<NoExpansion>(Exp.get()));
1038  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1039  }
1040 }
1041 
1042 void CodeGenFunction::ExpandTypeToArgs(
1043  QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
1044  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1045  auto Exp = getTypeExpansion(Ty, getContext());
1046  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1048  [&](Address EltAddr) {
1049  RValue EltRV =
1050  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
1051  ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
1052  });
1053  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1054  Address This = RV.getAggregateAddress();
1055  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1056  // Perform a single step derived-to-base conversion.
1057  Address Base =
1058  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1059  /*NullCheckValue=*/false, SourceLocation());
1060  RValue BaseRV = RValue::getAggregate(Base);
1061 
1062  // Recurse onto bases.
1063  ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
1064  IRCallArgPos);
1065  }
1066 
1067  LValue LV = MakeAddrLValue(This, Ty);
1068  for (auto FD : RExp->Fields) {
1069  RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
1070  ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
1071  IRCallArgPos);
1072  }
1073  } else if (isa<ComplexExpansion>(Exp.get())) {
1074  ComplexPairTy CV = RV.getComplexVal();
1075  IRCallArgs[IRCallArgPos++] = CV.first;
1076  IRCallArgs[IRCallArgPos++] = CV.second;
1077  } else {
1078  assert(isa<NoExpansion>(Exp.get()));
1079  assert(RV.isScalar() &&
1080  "Unexpected non-scalar rvalue during struct expansion.");
1081 
1082  // Insert a bitcast as needed.
1083  llvm::Value *V = RV.getScalarVal();
1084  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1085  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1086  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1087 
1088  IRCallArgs[IRCallArgPos++] = V;
1089  }
1090 }
1091 
1092 /// Create a temporary allocation for the purposes of coercion.
1094  CharUnits MinAlign) {
1095  // Don't use an alignment that's worse than what LLVM would prefer.
1096  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1097  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1098 
1099  return CGF.CreateTempAlloca(Ty, Align);
1100 }
1101 
1102 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1103 /// accessing some number of bytes out of it, try to gep into the struct to get
1104 /// at its inner goodness. Dive as deep as possible without entering an element
1105 /// with an in-memory size smaller than DstSize.
1106 static Address
1108  llvm::StructType *SrcSTy,
1109  uint64_t DstSize, CodeGenFunction &CGF) {
1110  // We can't dive into a zero-element struct.
1111  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1112 
1113  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1114 
1115  // If the first elt is at least as large as what we're looking for, or if the
1116  // first element is the same size as the whole struct, we can enter it. The
1117  // comparison must be made on the store size and not the alloca size. Using
1118  // the alloca size may overstate the size of the load.
1119  uint64_t FirstEltSize =
1120  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1121  if (FirstEltSize < DstSize &&
1122  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1123  return SrcPtr;
1124 
1125  // GEP into the first element.
1126  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1127 
1128  // If the first element is a struct, recurse.
1129  llvm::Type *SrcTy = SrcPtr.getElementType();
1130  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1131  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1132 
1133  return SrcPtr;
1134 }
1135 
1136 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1137 /// are either integers or pointers. This does a truncation of the value if it
1138 /// is too large or a zero extension if it is too small.
1139 ///
1140 /// This behaves as if the value were coerced through memory, so on big-endian
1141 /// targets the high bits are preserved in a truncation, while little-endian
1142 /// targets preserve the low bits.
1144  llvm::Type *Ty,
1145  CodeGenFunction &CGF) {
1146  if (Val->getType() == Ty)
1147  return Val;
1148 
1149  if (isa<llvm::PointerType>(Val->getType())) {
1150  // If this is Pointer->Pointer avoid conversion to and from int.
1151  if (isa<llvm::PointerType>(Ty))
1152  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1153 
1154  // Convert the pointer to an integer so we can play with its width.
1155  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1156  }
1157 
1158  llvm::Type *DestIntTy = Ty;
1159  if (isa<llvm::PointerType>(DestIntTy))
1160  DestIntTy = CGF.IntPtrTy;
1161 
1162  if (Val->getType() != DestIntTy) {
1163  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1164  if (DL.isBigEndian()) {
1165  // Preserve the high bits on big-endian targets.
1166  // That is what memory coercion does.
1167  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1168  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1169 
1170  if (SrcSize > DstSize) {
1171  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1172  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1173  } else {
1174  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1175  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1176  }
1177  } else {
1178  // Little-endian targets preserve the low bits. No shifts required.
1179  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1180  }
1181  }
1182 
1183  if (isa<llvm::PointerType>(Ty))
1184  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1185  return Val;
1186 }
1187 
1188 
1189 
1190 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1191 /// a pointer to an object of type \arg Ty, known to be aligned to
1192 /// \arg SrcAlign bytes.
1193 ///
1194 /// This safely handles the case when the src type is smaller than the
1195 /// destination type; in this situation the values of bits which not
1196 /// present in the src are undefined.
1198  CodeGenFunction &CGF) {
1199  llvm::Type *SrcTy = Src.getElementType();
1200 
1201  // If SrcTy and Ty are the same, just do a load.
1202  if (SrcTy == Ty)
1203  return CGF.Builder.CreateLoad(Src);
1204 
1205  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1206 
1207  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1208  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1209  SrcTy = Src.getType()->getElementType();
1210  }
1211 
1212  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1213 
1214  // If the source and destination are integer or pointer types, just do an
1215  // extension or truncation to the desired type.
1216  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1217  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1218  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1219  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1220  }
1221 
1222  // If load is legal, just bitcast the src pointer.
1223  if (SrcSize >= DstSize) {
1224  // Generally SrcSize is never greater than DstSize, since this means we are
1225  // losing bits. However, this can happen in cases where the structure has
1226  // additional padding, for example due to a user specified alignment.
1227  //
1228  // FIXME: Assert that we aren't truncating non-padding bits when have access
1229  // to that information.
1230  Src = CGF.Builder.CreateBitCast(Src,
1231  Ty->getPointerTo(Src.getAddressSpace()));
1232  return CGF.Builder.CreateLoad(Src);
1233  }
1234 
1235  // Otherwise do coercion through memory. This is stupid, but simple.
1236  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1237  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1238  Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1239  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1240  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1241  false);
1242  return CGF.Builder.CreateLoad(Tmp);
1243 }
1244 
1245 // Function to store a first-class aggregate into memory. We prefer to
1246 // store the elements rather than the aggregate to be more friendly to
1247 // fast-isel.
1248 // FIXME: Do we need to recurse here?
1250  Address Dest, bool DestIsVolatile) {
1251  // Prefer scalar stores to first-class aggregate stores.
1252  if (llvm::StructType *STy =
1253  dyn_cast<llvm::StructType>(Val->getType())) {
1254  const llvm::StructLayout *Layout =
1255  CGF.CGM.getDataLayout().getStructLayout(STy);
1256 
1257  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1258  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1259  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1260  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1261  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1262  }
1263  } else {
1264  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1265  }
1266 }
1267 
1268 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1269 /// where the source and destination may have different types. The
1270 /// destination is known to be aligned to \arg DstAlign bytes.
1271 ///
1272 /// This safely handles the case when the src type is larger than the
1273 /// destination type; the upper bits of the src will be lost.
1275  Address Dst,
1276  bool DstIsVolatile,
1277  CodeGenFunction &CGF) {
1278  llvm::Type *SrcTy = Src->getType();
1279  llvm::Type *DstTy = Dst.getType()->getElementType();
1280  if (SrcTy == DstTy) {
1281  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1282  return;
1283  }
1284 
1285  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1286 
1287  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1288  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1289  DstTy = Dst.getType()->getElementType();
1290  }
1291 
1292  // If the source and destination are integer or pointer types, just do an
1293  // extension or truncation to the desired type.
1294  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1295  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1296  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1297  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1298  return;
1299  }
1300 
1301  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1302 
1303  // If store is legal, just bitcast the src pointer.
1304  if (SrcSize <= DstSize) {
1305  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1306  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1307  } else {
1308  // Otherwise do coercion through memory. This is stupid, but
1309  // simple.
1310 
1311  // Generally SrcSize is never greater than DstSize, since this means we are
1312  // losing bits. However, this can happen in cases where the structure has
1313  // additional padding, for example due to a user specified alignment.
1314  //
1315  // FIXME: Assert that we aren't truncating non-padding bits when have access
1316  // to that information.
1317  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1318  CGF.Builder.CreateStore(Src, Tmp);
1319  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1320  Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1321  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1322  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1323  false);
1324  }
1325 }
1326 
1328  const ABIArgInfo &info) {
1329  if (unsigned offset = info.getDirectOffset()) {
1330  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1331  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1332  CharUnits::fromQuantity(offset));
1333  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1334  }
1335  return addr;
1336 }
1337 
1338 namespace {
1339 
1340 /// Encapsulates information about the way function arguments from
1341 /// CGFunctionInfo should be passed to actual LLVM IR function.
1342 class ClangToLLVMArgMapping {
1343  static const unsigned InvalidIndex = ~0U;
1344  unsigned InallocaArgNo;
1345  unsigned SRetArgNo;
1346  unsigned TotalIRArgs;
1347 
1348  /// Arguments of LLVM IR function corresponding to single Clang argument.
1349  struct IRArgs {
1350  unsigned PaddingArgIndex;
1351  // Argument is expanded to IR arguments at positions
1352  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1353  unsigned FirstArgIndex;
1354  unsigned NumberOfArgs;
1355 
1356  IRArgs()
1357  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1358  NumberOfArgs(0) {}
1359  };
1360 
1361  SmallVector<IRArgs, 8> ArgInfo;
1362 
1363 public:
1364  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1365  bool OnlyRequiredArgs = false)
1366  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1367  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1368  construct(Context, FI, OnlyRequiredArgs);
1369  }
1370 
1371  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1372  unsigned getInallocaArgNo() const {
1373  assert(hasInallocaArg());
1374  return InallocaArgNo;
1375  }
1376 
1377  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1378  unsigned getSRetArgNo() const {
1379  assert(hasSRetArg());
1380  return SRetArgNo;
1381  }
1382 
1383  unsigned totalIRArgs() const { return TotalIRArgs; }
1384 
1385  bool hasPaddingArg(unsigned ArgNo) const {
1386  assert(ArgNo < ArgInfo.size());
1387  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1388  }
1389  unsigned getPaddingArgNo(unsigned ArgNo) const {
1390  assert(hasPaddingArg(ArgNo));
1391  return ArgInfo[ArgNo].PaddingArgIndex;
1392  }
1393 
1394  /// Returns index of first IR argument corresponding to ArgNo, and their
1395  /// quantity.
1396  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1397  assert(ArgNo < ArgInfo.size());
1398  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1399  ArgInfo[ArgNo].NumberOfArgs);
1400  }
1401 
1402 private:
1403  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1404  bool OnlyRequiredArgs);
1405 };
1406 
1407 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1408  const CGFunctionInfo &FI,
1409  bool OnlyRequiredArgs) {
1410  unsigned IRArgNo = 0;
1411  bool SwapThisWithSRet = false;
1412  const ABIArgInfo &RetAI = FI.getReturnInfo();
1413 
1414  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1415  SwapThisWithSRet = RetAI.isSRetAfterThis();
1416  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1417  }
1418 
1419  unsigned ArgNo = 0;
1420  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1421  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1422  ++I, ++ArgNo) {
1423  assert(I != FI.arg_end());
1424  QualType ArgType = I->type;
1425  const ABIArgInfo &AI = I->info;
1426  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1427  auto &IRArgs = ArgInfo[ArgNo];
1428 
1429  if (AI.getPaddingType())
1430  IRArgs.PaddingArgIndex = IRArgNo++;
1431 
1432  switch (AI.getKind()) {
1433  case ABIArgInfo::Extend:
1434  case ABIArgInfo::Direct: {
1435  // FIXME: handle sseregparm someday...
1436  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1437  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1438  IRArgs.NumberOfArgs = STy->getNumElements();
1439  } else {
1440  IRArgs.NumberOfArgs = 1;
1441  }
1442  break;
1443  }
1444  case ABIArgInfo::Indirect:
1445  IRArgs.NumberOfArgs = 1;
1446  break;
1447  case ABIArgInfo::Ignore:
1448  case ABIArgInfo::InAlloca:
1449  // ignore and inalloca doesn't have matching LLVM parameters.
1450  IRArgs.NumberOfArgs = 0;
1451  break;
1453  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1454  break;
1455  case ABIArgInfo::Expand:
1456  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1457  break;
1458  }
1459 
1460  if (IRArgs.NumberOfArgs > 0) {
1461  IRArgs.FirstArgIndex = IRArgNo;
1462  IRArgNo += IRArgs.NumberOfArgs;
1463  }
1464 
1465  // Skip over the sret parameter when it comes second. We already handled it
1466  // above.
1467  if (IRArgNo == 1 && SwapThisWithSRet)
1468  IRArgNo++;
1469  }
1470  assert(ArgNo == ArgInfo.size());
1471 
1472  if (FI.usesInAlloca())
1473  InallocaArgNo = IRArgNo++;
1474 
1475  TotalIRArgs = IRArgNo;
1476 }
1477 } // namespace
1478 
1479 /***/
1480 
1482  return FI.getReturnInfo().isIndirect();
1483 }
1484 
1486  return ReturnTypeUsesSRet(FI) &&
1487  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1488 }
1489 
1491  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1492  switch (BT->getKind()) {
1493  default:
1494  return false;
1495  case BuiltinType::Float:
1497  case BuiltinType::Double:
1499  case BuiltinType::LongDouble:
1501  }
1502  }
1503 
1504  return false;
1505 }
1506 
1508  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1509  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1510  if (BT->getKind() == BuiltinType::LongDouble)
1512  }
1513  }
1514 
1515  return false;
1516 }
1517 
1519  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1520  return GetFunctionType(FI);
1521 }
1522 
1523 llvm::FunctionType *
1525 
1526  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1527  (void)Inserted;
1528  assert(Inserted && "Recursively being processed?");
1529 
1530  llvm::Type *resultType = nullptr;
1531  const ABIArgInfo &retAI = FI.getReturnInfo();
1532  switch (retAI.getKind()) {
1533  case ABIArgInfo::Expand:
1534  llvm_unreachable("Invalid ABI kind for return argument");
1535 
1536  case ABIArgInfo::Extend:
1537  case ABIArgInfo::Direct:
1538  resultType = retAI.getCoerceToType();
1539  break;
1540 
1541  case ABIArgInfo::InAlloca:
1542  if (retAI.getInAllocaSRet()) {
1543  // sret things on win32 aren't void, they return the sret pointer.
1544  QualType ret = FI.getReturnType();
1545  llvm::Type *ty = ConvertType(ret);
1546  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1547  resultType = llvm::PointerType::get(ty, addressSpace);
1548  } else {
1549  resultType = llvm::Type::getVoidTy(getLLVMContext());
1550  }
1551  break;
1552 
1553  case ABIArgInfo::Indirect:
1554  case ABIArgInfo::Ignore:
1555  resultType = llvm::Type::getVoidTy(getLLVMContext());
1556  break;
1557 
1559  resultType = retAI.getUnpaddedCoerceAndExpandType();
1560  break;
1561  }
1562 
1563  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1564  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1565 
1566  // Add type for sret argument.
1567  if (IRFunctionArgs.hasSRetArg()) {
1568  QualType Ret = FI.getReturnType();
1569  llvm::Type *Ty = ConvertType(Ret);
1570  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1571  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1572  llvm::PointerType::get(Ty, AddressSpace);
1573  }
1574 
1575  // Add type for inalloca argument.
1576  if (IRFunctionArgs.hasInallocaArg()) {
1577  auto ArgStruct = FI.getArgStruct();
1578  assert(ArgStruct);
1579  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1580  }
1581 
1582  // Add in all of the required arguments.
1583  unsigned ArgNo = 0;
1585  ie = it + FI.getNumRequiredArgs();
1586  for (; it != ie; ++it, ++ArgNo) {
1587  const ABIArgInfo &ArgInfo = it->info;
1588 
1589  // Insert a padding type to ensure proper alignment.
1590  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1591  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1592  ArgInfo.getPaddingType();
1593 
1594  unsigned FirstIRArg, NumIRArgs;
1595  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1596 
1597  switch (ArgInfo.getKind()) {
1598  case ABIArgInfo::Ignore:
1599  case ABIArgInfo::InAlloca:
1600  assert(NumIRArgs == 0);
1601  break;
1602 
1603  case ABIArgInfo::Indirect: {
1604  assert(NumIRArgs == 1);
1605  // indirect arguments are always on the stack, which is alloca addr space.
1606  llvm::Type *LTy = ConvertTypeForMem(it->type);
1607  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1608  CGM.getDataLayout().getAllocaAddrSpace());
1609  break;
1610  }
1611 
1612  case ABIArgInfo::Extend:
1613  case ABIArgInfo::Direct: {
1614  // Fast-isel and the optimizer generally like scalar values better than
1615  // FCAs, so we flatten them if this is safe to do for this argument.
1616  llvm::Type *argType = ArgInfo.getCoerceToType();
1617  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1618  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1619  assert(NumIRArgs == st->getNumElements());
1620  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1621  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1622  } else {
1623  assert(NumIRArgs == 1);
1624  ArgTypes[FirstIRArg] = argType;
1625  }
1626  break;
1627  }
1628 
1630  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1631  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1632  *ArgTypesIter++ = EltTy;
1633  }
1634  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1635  break;
1636  }
1637 
1638  case ABIArgInfo::Expand:
1639  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1640  getExpandedTypes(it->type, ArgTypesIter);
1641  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1642  break;
1643  }
1644  }
1645 
1646  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1647  assert(Erased && "Not in set?");
1648 
1649  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1650 }
1651 
1653  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1654  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1655 
1656  if (!isFuncTypeConvertible(FPT))
1657  return llvm::StructType::get(getLLVMContext());
1658 
1659  const CGFunctionInfo *Info;
1660  if (isa<CXXDestructorDecl>(MD))
1661  Info =
1663  else
1664  Info = &arrangeCXXMethodDeclaration(MD);
1665  return GetFunctionType(*Info);
1666 }
1667 
1669  llvm::AttrBuilder &FuncAttrs,
1670  const FunctionProtoType *FPT) {
1671  if (!FPT)
1672  return;
1673 
1675  FPT->isNothrow(Ctx))
1676  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1677 }
1678 
1679 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1680  bool AttrOnCallSite,
1681  llvm::AttrBuilder &FuncAttrs) {
1682  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1683  if (!HasOptnone) {
1684  if (CodeGenOpts.OptimizeSize)
1685  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1686  if (CodeGenOpts.OptimizeSize == 2)
1687  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1688  }
1689 
1690  if (CodeGenOpts.DisableRedZone)
1691  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1692  if (CodeGenOpts.NoImplicitFloat)
1693  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1694 
1695  if (AttrOnCallSite) {
1696  // Attributes that should go on the call site only.
1697  if (!CodeGenOpts.SimplifyLibCalls ||
1698  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1699  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1700  if (!CodeGenOpts.TrapFuncName.empty())
1701  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1702  } else {
1703  // Attributes that should go on the function, but not the call site.
1704  if (!CodeGenOpts.DisableFPElim) {
1705  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1706  } else if (CodeGenOpts.OmitLeafFramePointer) {
1707  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1708  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1709  } else {
1710  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1711  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1712  }
1713 
1714  FuncAttrs.addAttribute("less-precise-fpmad",
1715  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1716 
1717  if (!CodeGenOpts.FPDenormalMode.empty())
1718  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1719 
1720  FuncAttrs.addAttribute("no-trapping-math",
1721  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1722 
1723  // TODO: Are these all needed?
1724  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1725  FuncAttrs.addAttribute("no-infs-fp-math",
1726  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1727  FuncAttrs.addAttribute("no-nans-fp-math",
1728  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1729  FuncAttrs.addAttribute("unsafe-fp-math",
1730  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1731  FuncAttrs.addAttribute("use-soft-float",
1732  llvm::toStringRef(CodeGenOpts.SoftFloat));
1733  FuncAttrs.addAttribute("stack-protector-buffer-size",
1734  llvm::utostr(CodeGenOpts.SSPBufferSize));
1735  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1736  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1737  FuncAttrs.addAttribute(
1738  "correctly-rounded-divide-sqrt-fp-math",
1739  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1740 
1741  // TODO: Reciprocal estimate codegen options should apply to instructions?
1742  std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
1743  if (!Recips.empty())
1744  FuncAttrs.addAttribute("reciprocal-estimates",
1745  llvm::join(Recips.begin(), Recips.end(), ","));
1746 
1747  if (CodeGenOpts.StackRealignment)
1748  FuncAttrs.addAttribute("stackrealign");
1749  if (CodeGenOpts.Backchain)
1750  FuncAttrs.addAttribute("backchain");
1751  }
1752 
1753  if (getLangOpts().assumeFunctionsAreConvergent()) {
1754  // Conservatively, mark all functions and calls in CUDA and OpenCL as
1755  // convergent (meaning, they may call an intrinsically convergent op, such
1756  // as __syncthreads() / barrier(), and so can't have certain optimizations
1757  // applied around them). LLVM will remove this attribute where it safely
1758  // can.
1759  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1760  }
1761 
1762  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1763  // Exceptions aren't supported in CUDA device code.
1764  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1765 
1766  // Respect -fcuda-flush-denormals-to-zero.
1767  if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1768  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1769  }
1770 }
1771 
1772 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1773  llvm::AttrBuilder FuncAttrs;
1774  ConstructDefaultFnAttrList(F.getName(),
1775  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1776  /* AttrOnCallsite = */ false, FuncAttrs);
1777  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1778 }
1779 
1781  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1782  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1783  llvm::AttrBuilder FuncAttrs;
1784  llvm::AttrBuilder RetAttrs;
1785 
1786  CallingConv = FI.getEffectiveCallingConvention();
1787  if (FI.isNoReturn())
1788  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1789 
1790  // If we have information about the function prototype, we can learn
1791  // attributes form there.
1793  CalleeInfo.getCalleeFunctionProtoType());
1794 
1795  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1796 
1797  bool HasOptnone = false;
1798  // FIXME: handle sseregparm someday...
1799  if (TargetDecl) {
1800  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1801  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1802  if (TargetDecl->hasAttr<NoThrowAttr>())
1803  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1804  if (TargetDecl->hasAttr<NoReturnAttr>())
1805  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1806  if (TargetDecl->hasAttr<ColdAttr>())
1807  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1808  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1809  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1810  if (TargetDecl->hasAttr<ConvergentAttr>())
1811  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1812 
1813  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1815  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1816  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1817  // These attributes are not inherited by overloads.
1818  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1819  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1820  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1821  }
1822 
1823  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1824  if (TargetDecl->hasAttr<ConstAttr>()) {
1825  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1826  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1827  } else if (TargetDecl->hasAttr<PureAttr>()) {
1828  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1829  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1830  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1831  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1832  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1833  }
1834  if (TargetDecl->hasAttr<RestrictAttr>())
1835  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1836  if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1837  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1838  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1839  FuncAttrs.addAttribute("no_caller_saved_registers");
1840 
1841  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1842  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1843  Optional<unsigned> NumElemsParam;
1844  // alloc_size args are base-1, 0 means not present.
1845  if (unsigned N = AllocSize->getNumElemsParam())
1846  NumElemsParam = N - 1;
1847  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
1848  NumElemsParam);
1849  }
1850  }
1851 
1852  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1853 
1854  if (CodeGenOpts.EnableSegmentedStacks &&
1855  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1856  FuncAttrs.addAttribute("split-stack");
1857 
1858  if (!AttrOnCallSite) {
1859  bool DisableTailCalls =
1860  CodeGenOpts.DisableTailCalls ||
1861  (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1862  TargetDecl->hasAttr<AnyX86InterruptAttr>()));
1863  FuncAttrs.addAttribute("disable-tail-calls",
1864  llvm::toStringRef(DisableTailCalls));
1865 
1866  // Add target-cpu and target-features attributes to functions. If
1867  // we have a decl for the function and it has a target attribute then
1868  // parse that and add it to the feature set.
1869  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1870  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1871  if (FD && FD->hasAttr<TargetAttr>()) {
1872  llvm::StringMap<bool> FeatureMap;
1873  getFunctionFeatureMap(FeatureMap, FD);
1874 
1875  // Produce the canonical string for this set of features.
1876  std::vector<std::string> Features;
1877  for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1878  ie = FeatureMap.end();
1879  it != ie; ++it)
1880  Features.push_back((it->second ? "+" : "-") + it->first().str());
1881 
1882  // Now add the target-cpu and target-features to the function.
1883  // While we populated the feature map above, we still need to
1884  // get and parse the target attribute so we can get the cpu for
1885  // the function.
1886  const auto *TD = FD->getAttr<TargetAttr>();
1887  TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1888  if (ParsedAttr.Architecture != "")
1889  TargetCPU = ParsedAttr.Architecture;
1890  if (TargetCPU != "")
1891  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1892  if (!Features.empty()) {
1893  std::sort(Features.begin(), Features.end());
1894  FuncAttrs.addAttribute(
1895  "target-features",
1896  llvm::join(Features.begin(), Features.end(), ","));
1897  }
1898  } else {
1899  // Otherwise just add the existing target cpu and target features to the
1900  // function.
1901  std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1902  if (TargetCPU != "")
1903  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1904  if (!Features.empty()) {
1905  std::sort(Features.begin(), Features.end());
1906  FuncAttrs.addAttribute(
1907  "target-features",
1908  llvm::join(Features.begin(), Features.end(), ","));
1909  }
1910  }
1911  }
1912 
1913  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1914 
1915  QualType RetTy = FI.getReturnType();
1916  const ABIArgInfo &RetAI = FI.getReturnInfo();
1917  switch (RetAI.getKind()) {
1918  case ABIArgInfo::Extend:
1919  if (RetTy->hasSignedIntegerRepresentation())
1920  RetAttrs.addAttribute(llvm::Attribute::SExt);
1921  else if (RetTy->hasUnsignedIntegerRepresentation())
1922  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1923  // FALL THROUGH
1924  case ABIArgInfo::Direct:
1925  if (RetAI.getInReg())
1926  RetAttrs.addAttribute(llvm::Attribute::InReg);
1927  break;
1928  case ABIArgInfo::Ignore:
1929  break;
1930 
1931  case ABIArgInfo::InAlloca:
1932  case ABIArgInfo::Indirect: {
1933  // inalloca and sret disable readnone and readonly
1934  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1935  .removeAttribute(llvm::Attribute::ReadNone);
1936  break;
1937  }
1938 
1940  break;
1941 
1942  case ABIArgInfo::Expand:
1943  llvm_unreachable("Invalid ABI kind for return argument");
1944  }
1945 
1946  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1947  QualType PTy = RefTy->getPointeeType();
1948  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1949  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1950  .getQuantity());
1951  else if (getContext().getTargetAddressSpace(PTy) == 0)
1952  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1953  }
1954 
1955  bool hasUsedSRet = false;
1956  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1957 
1958  // Attach attributes to sret.
1959  if (IRFunctionArgs.hasSRetArg()) {
1960  llvm::AttrBuilder SRETAttrs;
1961  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1962  hasUsedSRet = true;
1963  if (RetAI.getInReg())
1964  SRETAttrs.addAttribute(llvm::Attribute::InReg);
1965  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
1966  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
1967  }
1968 
1969  // Attach attributes to inalloca argument.
1970  if (IRFunctionArgs.hasInallocaArg()) {
1971  llvm::AttrBuilder Attrs;
1972  Attrs.addAttribute(llvm::Attribute::InAlloca);
1973  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
1974  llvm::AttributeSet::get(getLLVMContext(), Attrs);
1975  }
1976 
1977  unsigned ArgNo = 0;
1979  E = FI.arg_end();
1980  I != E; ++I, ++ArgNo) {
1981  QualType ParamType = I->type;
1982  const ABIArgInfo &AI = I->info;
1983  llvm::AttrBuilder Attrs;
1984 
1985  // Add attribute for padding argument, if necessary.
1986  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1987  if (AI.getPaddingInReg()) {
1988  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1989  llvm::AttributeSet::get(
1990  getLLVMContext(),
1991  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
1992  }
1993  }
1994 
1995  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1996  // have the corresponding parameter variable. It doesn't make
1997  // sense to do it here because parameters are so messed up.
1998  switch (AI.getKind()) {
1999  case ABIArgInfo::Extend:
2000  if (ParamType->isSignedIntegerOrEnumerationType())
2001  Attrs.addAttribute(llvm::Attribute::SExt);
2002  else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
2003  if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
2004  Attrs.addAttribute(llvm::Attribute::SExt);
2005  else
2006  Attrs.addAttribute(llvm::Attribute::ZExt);
2007  }
2008  // FALL THROUGH
2009  case ABIArgInfo::Direct:
2010  if (ArgNo == 0 && FI.isChainCall())
2011  Attrs.addAttribute(llvm::Attribute::Nest);
2012  else if (AI.getInReg())
2013  Attrs.addAttribute(llvm::Attribute::InReg);
2014  break;
2015 
2016  case ABIArgInfo::Indirect: {
2017  if (AI.getInReg())
2018  Attrs.addAttribute(llvm::Attribute::InReg);
2019 
2020  if (AI.getIndirectByVal())
2021  Attrs.addAttribute(llvm::Attribute::ByVal);
2022 
2023  CharUnits Align = AI.getIndirectAlign();
2024 
2025  // In a byval argument, it is important that the required
2026  // alignment of the type is honored, as LLVM might be creating a
2027  // *new* stack object, and needs to know what alignment to give
2028  // it. (Sometimes it can deduce a sensible alignment on its own,
2029  // but not if clang decides it must emit a packed struct, or the
2030  // user specifies increased alignment requirements.)
2031  //
2032  // This is different from indirect *not* byval, where the object
2033  // exists already, and the align attribute is purely
2034  // informative.
2035  assert(!Align.isZero());
2036 
2037  // For now, only add this when we have a byval argument.
2038  // TODO: be less lazy about updating test cases.
2039  if (AI.getIndirectByVal())
2040  Attrs.addAlignmentAttr(Align.getQuantity());
2041 
2042  // byval disables readnone and readonly.
2043  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2044  .removeAttribute(llvm::Attribute::ReadNone);
2045  break;
2046  }
2047  case ABIArgInfo::Ignore:
2048  case ABIArgInfo::Expand:
2050  break;
2051 
2052  case ABIArgInfo::InAlloca:
2053  // inalloca disables readnone and readonly.
2054  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2055  .removeAttribute(llvm::Attribute::ReadNone);
2056  continue;
2057  }
2058 
2059  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2060  QualType PTy = RefTy->getPointeeType();
2061  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2062  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2063  .getQuantity());
2064  else if (getContext().getTargetAddressSpace(PTy) == 0)
2065  Attrs.addAttribute(llvm::Attribute::NonNull);
2066  }
2067 
2068  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2070  break;
2071 
2073  // Add 'sret' if we haven't already used it for something, but
2074  // only if the result is void.
2075  if (!hasUsedSRet && RetTy->isVoidType()) {
2076  Attrs.addAttribute(llvm::Attribute::StructRet);
2077  hasUsedSRet = true;
2078  }
2079 
2080  // Add 'noalias' in either case.
2081  Attrs.addAttribute(llvm::Attribute::NoAlias);
2082 
2083  // Add 'dereferenceable' and 'alignment'.
2084  auto PTy = ParamType->getPointeeType();
2085  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2086  auto info = getContext().getTypeInfoInChars(PTy);
2087  Attrs.addDereferenceableAttr(info.first.getQuantity());
2088  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2089  info.second.getQuantity()));
2090  }
2091  break;
2092  }
2093 
2095  Attrs.addAttribute(llvm::Attribute::SwiftError);
2096  break;
2097 
2099  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2100  break;
2101  }
2102 
2103  if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2104  Attrs.addAttribute(llvm::Attribute::NoCapture);
2105 
2106  if (Attrs.hasAttributes()) {
2107  unsigned FirstIRArg, NumIRArgs;
2108  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2109  for (unsigned i = 0; i < NumIRArgs; i++)
2110  ArgAttrs[FirstIRArg + i] =
2111  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2112  }
2113  }
2114  assert(ArgNo == FI.arg_size());
2115 
2116  AttrList = llvm::AttributeList::get(
2117  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2118  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2119 }
2120 
2121 /// An argument came in as a promoted argument; demote it back to its
2122 /// declared type.
2124  const VarDecl *var,
2125  llvm::Value *value) {
2126  llvm::Type *varType = CGF.ConvertType(var->getType());
2127 
2128  // This can happen with promotions that actually don't change the
2129  // underlying type, like the enum promotions.
2130  if (value->getType() == varType) return value;
2131 
2132  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2133  && "unexpected promotion type");
2134 
2135  if (isa<llvm::IntegerType>(varType))
2136  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2137 
2138  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2139 }
2140 
2141 /// Returns the attribute (either parameter attribute, or function
2142 /// attribute), which declares argument ArgNo to be non-null.
2143 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2144  QualType ArgType, unsigned ArgNo) {
2145  // FIXME: __attribute__((nonnull)) can also be applied to:
2146  // - references to pointers, where the pointee is known to be
2147  // nonnull (apparently a Clang extension)
2148  // - transparent unions containing pointers
2149  // In the former case, LLVM IR cannot represent the constraint. In
2150  // the latter case, we have no guarantee that the transparent union
2151  // is in fact passed as a pointer.
2152  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2153  return nullptr;
2154  // First, check attribute on parameter itself.
2155  if (PVD) {
2156  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2157  return ParmNNAttr;
2158  }
2159  // Check function attributes.
2160  if (!FD)
2161  return nullptr;
2162  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2163  if (NNAttr->isNonNull(ArgNo))
2164  return NNAttr;
2165  }
2166  return nullptr;
2167 }
2168 
2169 namespace {
2170  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2171  Address Temp;
2172  Address Arg;
2173  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2174  void Emit(CodeGenFunction &CGF, Flags flags) override {
2175  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2176  CGF.Builder.CreateStore(errorValue, Arg);
2177  }
2178  };
2179 }
2180 
2182  llvm::Function *Fn,
2183  const FunctionArgList &Args) {
2184  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2185  // Naked functions don't have prologues.
2186  return;
2187 
2188  // If this is an implicit-return-zero function, go ahead and
2189  // initialize the return value. TODO: it might be nice to have
2190  // a more general mechanism for this that didn't require synthesized
2191  // return statements.
2192  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2193  if (FD->hasImplicitReturnZero()) {
2194  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2195  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2196  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2197  Builder.CreateStore(Zero, ReturnValue);
2198  }
2199  }
2200 
2201  // FIXME: We no longer need the types from FunctionArgList; lift up and
2202  // simplify.
2203 
2204  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2205  // Flattened function arguments.
2207  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2208  for (auto &Arg : Fn->args()) {
2209  FnArgs.push_back(&Arg);
2210  }
2211  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2212 
2213  // If we're using inalloca, all the memory arguments are GEPs off of the last
2214  // parameter, which is a pointer to the complete memory area.
2215  Address ArgStruct = Address::invalid();
2216  const llvm::StructLayout *ArgStructLayout = nullptr;
2217  if (IRFunctionArgs.hasInallocaArg()) {
2218  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2219  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2220  FI.getArgStructAlignment());
2221 
2222  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2223  }
2224 
2225  // Name the struct return parameter.
2226  if (IRFunctionArgs.hasSRetArg()) {
2227  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2228  AI->setName("agg.result");
2229  AI->addAttr(llvm::Attribute::NoAlias);
2230  }
2231 
2232  // Track if we received the parameter as a pointer (indirect, byval, or
2233  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2234  // into a local alloca for us.
2236  ArgVals.reserve(Args.size());
2237 
2238  // Create a pointer value for every parameter declaration. This usually
2239  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2240  // any cleanups or do anything that might unwind. We do that separately, so
2241  // we can push the cleanups in the correct order for the ABI.
2242  assert(FI.arg_size() == Args.size() &&
2243  "Mismatch between function signature & arguments.");
2244  unsigned ArgNo = 0;
2246  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2247  i != e; ++i, ++info_it, ++ArgNo) {
2248  const VarDecl *Arg = *i;
2249  QualType Ty = info_it->type;
2250  const ABIArgInfo &ArgI = info_it->info;
2251 
2252  bool isPromoted =
2253  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2254 
2255  unsigned FirstIRArg, NumIRArgs;
2256  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2257 
2258  switch (ArgI.getKind()) {
2259  case ABIArgInfo::InAlloca: {
2260  assert(NumIRArgs == 0);
2261  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2262  CharUnits FieldOffset =
2263  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2264  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2265  Arg->getName());
2266  ArgVals.push_back(ParamValue::forIndirect(V));
2267  break;
2268  }
2269 
2270  case ABIArgInfo::Indirect: {
2271  assert(NumIRArgs == 1);
2272  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2273 
2274  if (!hasScalarEvaluationKind(Ty)) {
2275  // Aggregates and complex variables are accessed by reference. All we
2276  // need to do is realign the value, if requested.
2277  Address V = ParamAddr;
2278  if (ArgI.getIndirectRealign()) {
2279  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2280 
2281  // Copy from the incoming argument pointer to the temporary with the
2282  // appropriate alignment.
2283  //
2284  // FIXME: We should have a common utility for generating an aggregate
2285  // copy.
2287  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2288  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2289  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2290  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2291  V = AlignedTemp;
2292  }
2293  ArgVals.push_back(ParamValue::forIndirect(V));
2294  } else {
2295  // Load scalar value from indirect argument.
2296  llvm::Value *V =
2297  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2298 
2299  if (isPromoted)
2300  V = emitArgumentDemotion(*this, Arg, V);
2301  ArgVals.push_back(ParamValue::forDirect(V));
2302  }
2303  break;
2304  }
2305 
2306  case ABIArgInfo::Extend:
2307  case ABIArgInfo::Direct: {
2308 
2309  // If we have the trivial case, handle it with no muss and fuss.
2310  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2311  ArgI.getCoerceToType() == ConvertType(Ty) &&
2312  ArgI.getDirectOffset() == 0) {
2313  assert(NumIRArgs == 1);
2314  llvm::Value *V = FnArgs[FirstIRArg];
2315  auto AI = cast<llvm::Argument>(V);
2316 
2317  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2318  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2319  PVD->getFunctionScopeIndex()))
2320  AI->addAttr(llvm::Attribute::NonNull);
2321 
2322  QualType OTy = PVD->getOriginalType();
2323  if (const auto *ArrTy =
2324  getContext().getAsConstantArrayType(OTy)) {
2325  // A C99 array parameter declaration with the static keyword also
2326  // indicates dereferenceability, and if the size is constant we can
2327  // use the dereferenceable attribute (which requires the size in
2328  // bytes).
2329  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2330  QualType ETy = ArrTy->getElementType();
2331  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2332  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2333  ArrSize) {
2334  llvm::AttrBuilder Attrs;
2335  Attrs.addDereferenceableAttr(
2336  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2337  AI->addAttrs(Attrs);
2338  } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2339  AI->addAttr(llvm::Attribute::NonNull);
2340  }
2341  }
2342  } else if (const auto *ArrTy =
2343  getContext().getAsVariableArrayType(OTy)) {
2344  // For C99 VLAs with the static keyword, we don't know the size so
2345  // we can't use the dereferenceable attribute, but in addrspace(0)
2346  // we know that it must be nonnull.
2347  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2348  !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2349  AI->addAttr(llvm::Attribute::NonNull);
2350  }
2351 
2352  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2353  if (!AVAttr)
2354  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2355  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2356  if (AVAttr) {
2357  llvm::Value *AlignmentValue =
2358  EmitScalarExpr(AVAttr->getAlignment());
2359  llvm::ConstantInt *AlignmentCI =
2360  cast<llvm::ConstantInt>(AlignmentValue);
2361  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2362  +llvm::Value::MaximumAlignment);
2363  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2364  }
2365  }
2366 
2367  if (Arg->getType().isRestrictQualified())
2368  AI->addAttr(llvm::Attribute::NoAlias);
2369 
2370  // LLVM expects swifterror parameters to be used in very restricted
2371  // ways. Copy the value into a less-restricted temporary.
2372  if (FI.getExtParameterInfo(ArgNo).getABI()
2374  QualType pointeeTy = Ty->getPointeeType();
2375  assert(pointeeTy->isPointerType());
2376  Address temp =
2377  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2378  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2379  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2380  Builder.CreateStore(incomingErrorValue, temp);
2381  V = temp.getPointer();
2382 
2383  // Push a cleanup to copy the value back at the end of the function.
2384  // The convention does not guarantee that the value will be written
2385  // back if the function exits with an unwind exception.
2386  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2387  }
2388 
2389  // Ensure the argument is the correct type.
2390  if (V->getType() != ArgI.getCoerceToType())
2391  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2392 
2393  if (isPromoted)
2394  V = emitArgumentDemotion(*this, Arg, V);
2395 
2396  // Because of merging of function types from multiple decls it is
2397  // possible for the type of an argument to not match the corresponding
2398  // type in the function type. Since we are codegening the callee
2399  // in here, add a cast to the argument type.
2400  llvm::Type *LTy = ConvertType(Arg->getType());
2401  if (V->getType() != LTy)
2402  V = Builder.CreateBitCast(V, LTy);
2403 
2404  ArgVals.push_back(ParamValue::forDirect(V));
2405  break;
2406  }
2407 
2408  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2409  Arg->getName());
2410 
2411  // Pointer to store into.
2412  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2413 
2414  // Fast-isel and the optimizer generally like scalar values better than
2415  // FCAs, so we flatten them if this is safe to do for this argument.
2416  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2417  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2418  STy->getNumElements() > 1) {
2419  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2420  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2421  llvm::Type *DstTy = Ptr.getElementType();
2422  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2423 
2424  Address AddrToStoreInto = Address::invalid();
2425  if (SrcSize <= DstSize) {
2426  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2427  } else {
2428  AddrToStoreInto =
2429  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2430  }
2431 
2432  assert(STy->getNumElements() == NumIRArgs);
2433  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2434  auto AI = FnArgs[FirstIRArg + i];
2435  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2436  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2437  Address EltPtr =
2438  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2439  Builder.CreateStore(AI, EltPtr);
2440  }
2441 
2442  if (SrcSize > DstSize) {
2443  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2444  }
2445 
2446  } else {
2447  // Simple case, just do a coerced store of the argument into the alloca.
2448  assert(NumIRArgs == 1);
2449  auto AI = FnArgs[FirstIRArg];
2450  AI->setName(Arg->getName() + ".coerce");
2451  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2452  }
2453 
2454  // Match to what EmitParmDecl is expecting for this type.
2456  llvm::Value *V =
2457  EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2458  if (isPromoted)
2459  V = emitArgumentDemotion(*this, Arg, V);
2460  ArgVals.push_back(ParamValue::forDirect(V));
2461  } else {
2462  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2463  }
2464  break;
2465  }
2466 
2468  // Reconstruct into a temporary.
2469  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2470  ArgVals.push_back(ParamValue::forIndirect(alloca));
2471 
2472  auto coercionType = ArgI.getCoerceAndExpandType();
2473  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2474  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2475 
2476  unsigned argIndex = FirstIRArg;
2477  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2478  llvm::Type *eltType = coercionType->getElementType(i);
2480  continue;
2481 
2482  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2483  auto elt = FnArgs[argIndex++];
2484  Builder.CreateStore(elt, eltAddr);
2485  }
2486  assert(argIndex == FirstIRArg + NumIRArgs);
2487  break;
2488  }
2489 
2490  case ABIArgInfo::Expand: {
2491  // If this structure was expanded into multiple arguments then
2492  // we need to create a temporary and reconstruct it from the
2493  // arguments.
2494  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2495  LValue LV = MakeAddrLValue(Alloca, Ty);
2496  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2497 
2498  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2499  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2500  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2501  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2502  auto AI = FnArgs[FirstIRArg + i];
2503  AI->setName(Arg->getName() + "." + Twine(i));
2504  }
2505  break;
2506  }
2507 
2508  case ABIArgInfo::Ignore:
2509  assert(NumIRArgs == 0);
2510  // Initialize the local variable appropriately.
2511  if (!hasScalarEvaluationKind(Ty)) {
2512  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2513  } else {
2514  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2515  ArgVals.push_back(ParamValue::forDirect(U));
2516  }
2517  break;
2518  }
2519  }
2520 
2521  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2522  for (int I = Args.size() - 1; I >= 0; --I)
2523  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2524  } else {
2525  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2526  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2527  }
2528 }
2529 
2530 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2531  while (insn->use_empty()) {
2532  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2533  if (!bitcast) return;
2534 
2535  // This is "safe" because we would have used a ConstantExpr otherwise.
2536  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2537  bitcast->eraseFromParent();
2538  }
2539 }
2540 
2541 /// Try to emit a fused autorelease of a return result.
2543  llvm::Value *result) {
2544  // We must be immediately followed the cast.
2545  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2546  if (BB->empty()) return nullptr;
2547  if (&BB->back() != result) return nullptr;
2548 
2549  llvm::Type *resultType = result->getType();
2550 
2551  // result is in a BasicBlock and is therefore an Instruction.
2552  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2553 
2555 
2556  // Look for:
2557  // %generator = bitcast %type1* %generator2 to %type2*
2558  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2559  // We would have emitted this as a constant if the operand weren't
2560  // an Instruction.
2561  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2562 
2563  // Require the generator to be immediately followed by the cast.
2564  if (generator->getNextNode() != bitcast)
2565  return nullptr;
2566 
2567  InstsToKill.push_back(bitcast);
2568  }
2569 
2570  // Look for:
2571  // %generator = call i8* @objc_retain(i8* %originalResult)
2572  // or
2573  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2574  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2575  if (!call) return nullptr;
2576 
2577  bool doRetainAutorelease;
2578 
2579  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2580  doRetainAutorelease = true;
2581  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2583  doRetainAutorelease = false;
2584 
2585  // If we emitted an assembly marker for this call (and the
2586  // ARCEntrypoints field should have been set if so), go looking
2587  // for that call. If we can't find it, we can't do this
2588  // optimization. But it should always be the immediately previous
2589  // instruction, unless we needed bitcasts around the call.
2591  llvm::Instruction *prev = call->getPrevNode();
2592  assert(prev);
2593  if (isa<llvm::BitCastInst>(prev)) {
2594  prev = prev->getPrevNode();
2595  assert(prev);
2596  }
2597  assert(isa<llvm::CallInst>(prev));
2598  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2600  InstsToKill.push_back(prev);
2601  }
2602  } else {
2603  return nullptr;
2604  }
2605 
2606  result = call->getArgOperand(0);
2607  InstsToKill.push_back(call);
2608 
2609  // Keep killing bitcasts, for sanity. Note that we no longer care
2610  // about precise ordering as long as there's exactly one use.
2611  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2612  if (!bitcast->hasOneUse()) break;
2613  InstsToKill.push_back(bitcast);
2614  result = bitcast->getOperand(0);
2615  }
2616 
2617  // Delete all the unnecessary instructions, from latest to earliest.
2618  for (auto *I : InstsToKill)
2619  I->eraseFromParent();
2620 
2621  // Do the fused retain/autorelease if we were asked to.
2622  if (doRetainAutorelease)
2623  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2624 
2625  // Cast back to the result type.
2626  return CGF.Builder.CreateBitCast(result, resultType);
2627 }
2628 
2629 /// If this is a +1 of the value of an immutable 'self', remove it.
2631  llvm::Value *result) {
2632  // This is only applicable to a method with an immutable 'self'.
2633  const ObjCMethodDecl *method =
2634  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2635  if (!method) return nullptr;
2636  const VarDecl *self = method->getSelfDecl();
2637  if (!self->getType().isConstQualified()) return nullptr;
2638 
2639  // Look for a retain call.
2640  llvm::CallInst *retainCall =
2641  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2642  if (!retainCall ||
2643  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2644  return nullptr;
2645 
2646  // Look for an ordinary load of 'self'.
2647  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2648  llvm::LoadInst *load =
2649  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2650  if (!load || load->isAtomic() || load->isVolatile() ||
2651  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2652  return nullptr;
2653 
2654  // Okay! Burn it all down. This relies for correctness on the
2655  // assumption that the retain is emitted as part of the return and
2656  // that thereafter everything is used "linearly".
2657  llvm::Type *resultType = result->getType();
2658  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2659  assert(retainCall->use_empty());
2660  retainCall->eraseFromParent();
2661  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2662 
2663  return CGF.Builder.CreateBitCast(load, resultType);
2664 }
2665 
2666 /// Emit an ARC autorelease of the result of a function.
2667 ///
2668 /// \return the value to actually return from the function
2670  llvm::Value *result) {
2671  // If we're returning 'self', kill the initial retain. This is a
2672  // heuristic attempt to "encourage correctness" in the really unfortunate
2673  // case where we have a return of self during a dealloc and we desperately
2674  // need to avoid the possible autorelease.
2675  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2676  return self;
2677 
2678  // At -O0, try to emit a fused retain/autorelease.
2679  if (CGF.shouldUseFusedARCCalls())
2680  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2681  return fused;
2682 
2683  return CGF.EmitARCAutoreleaseReturnValue(result);
2684 }
2685 
2686 /// Heuristically search for a dominating store to the return-value slot.
2687 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2688  // Check if a User is a store which pointerOperand is the ReturnValue.
2689  // We are looking for stores to the ReturnValue, not for stores of the
2690  // ReturnValue to some other location.
2691  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2692  auto *SI = dyn_cast<llvm::StoreInst>(U);
2693  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2694  return nullptr;
2695  // These aren't actually possible for non-coerced returns, and we
2696  // only care about non-coerced returns on this code path.
2697  assert(!SI->isAtomic() && !SI->isVolatile());
2698  return SI;
2699  };
2700  // If there are multiple uses of the return-value slot, just check
2701  // for something immediately preceding the IP. Sometimes this can
2702  // happen with how we generate implicit-returns; it can also happen
2703  // with noreturn cleanups.
2704  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2705  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2706  if (IP->empty()) return nullptr;
2707  llvm::Instruction *I = &IP->back();
2708 
2709  // Skip lifetime markers
2710  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2711  IE = IP->rend();
2712  II != IE; ++II) {
2713  if (llvm::IntrinsicInst *Intrinsic =
2714  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2715  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2716  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2717  ++II;
2718  if (II == IE)
2719  break;
2720  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2721  continue;
2722  }
2723  }
2724  I = &*II;
2725  break;
2726  }
2727 
2728  return GetStoreIfValid(I);
2729  }
2730 
2731  llvm::StoreInst *store =
2732  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2733  if (!store) return nullptr;
2734 
2735  // Now do a first-and-dirty dominance check: just walk up the
2736  // single-predecessors chain from the current insertion point.
2737  llvm::BasicBlock *StoreBB = store->getParent();
2738  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2739  while (IP != StoreBB) {
2740  if (!(IP = IP->getSinglePredecessor()))
2741  return nullptr;
2742  }
2743 
2744  // Okay, the store's basic block dominates the insertion point; we
2745  // can do our thing.
2746  return store;
2747 }
2748 
2750  bool EmitRetDbgLoc,
2751  SourceLocation EndLoc) {
2752  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2753  // Naked functions don't have epilogues.
2754  Builder.CreateUnreachable();
2755  return;
2756  }
2757 
2758  // Functions with no result always return void.
2759  if (!ReturnValue.isValid()) {
2760  Builder.CreateRetVoid();
2761  return;
2762  }
2763 
2764  llvm::DebugLoc RetDbgLoc;
2765  llvm::Value *RV = nullptr;
2766  QualType RetTy = FI.getReturnType();
2767  const ABIArgInfo &RetAI = FI.getReturnInfo();
2768 
2769  switch (RetAI.getKind()) {
2770  case ABIArgInfo::InAlloca:
2771  // Aggregrates get evaluated directly into the destination. Sometimes we
2772  // need to return the sret value in a register, though.
2773  assert(hasAggregateEvaluationKind(RetTy));
2774  if (RetAI.getInAllocaSRet()) {
2775  llvm::Function::arg_iterator EI = CurFn->arg_end();
2776  --EI;
2777  llvm::Value *ArgStruct = &*EI;
2778  llvm::Value *SRet = Builder.CreateStructGEP(
2779  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2780  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2781  }
2782  break;
2783 
2784  case ABIArgInfo::Indirect: {
2785  auto AI = CurFn->arg_begin();
2786  if (RetAI.isSRetAfterThis())
2787  ++AI;
2788  switch (getEvaluationKind(RetTy)) {
2789  case TEK_Complex: {
2790  ComplexPairTy RT =
2791  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2792  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2793  /*isInit*/ true);
2794  break;
2795  }
2796  case TEK_Aggregate:
2797  // Do nothing; aggregrates get evaluated directly into the destination.
2798  break;
2799  case TEK_Scalar:
2800  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2801  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2802  /*isInit*/ true);
2803  break;
2804  }
2805  break;
2806  }
2807 
2808  case ABIArgInfo::Extend:
2809  case ABIArgInfo::Direct:
2810  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2811  RetAI.getDirectOffset() == 0) {
2812  // The internal return value temp always will have pointer-to-return-type
2813  // type, just do a load.
2814 
2815  // If there is a dominating store to ReturnValue, we can elide
2816  // the load, zap the store, and usually zap the alloca.
2817  if (llvm::StoreInst *SI =
2819  // Reuse the debug location from the store unless there is
2820  // cleanup code to be emitted between the store and return
2821  // instruction.
2822  if (EmitRetDbgLoc && !AutoreleaseResult)
2823  RetDbgLoc = SI->getDebugLoc();
2824  // Get the stored value and nuke the now-dead store.
2825  RV = SI->getValueOperand();
2826  SI->eraseFromParent();
2827 
2828  // If that was the only use of the return value, nuke it as well now.
2829  auto returnValueInst = ReturnValue.getPointer();
2830  if (returnValueInst->use_empty()) {
2831  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2832  alloca->eraseFromParent();
2833  ReturnValue = Address::invalid();
2834  }
2835  }
2836 
2837  // Otherwise, we have to do a simple load.
2838  } else {
2839  RV = Builder.CreateLoad(ReturnValue);
2840  }
2841  } else {
2842  // If the value is offset in memory, apply the offset now.
2843  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2844 
2845  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2846  }
2847 
2848  // In ARC, end functions that return a retainable type with a call
2849  // to objc_autoreleaseReturnValue.
2850  if (AutoreleaseResult) {
2851 #ifndef NDEBUG
2852  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2853  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2854  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2855  // CurCodeDecl or BlockInfo.
2856  QualType RT;
2857 
2858  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2859  RT = FD->getReturnType();
2860  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2861  RT = MD->getReturnType();
2862  else if (isa<BlockDecl>(CurCodeDecl))
2863  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2864  else
2865  llvm_unreachable("Unexpected function/method type");
2866 
2867  assert(getLangOpts().ObjCAutoRefCount &&
2868  !FI.isReturnsRetained() &&
2869  RT->isObjCRetainableType());
2870 #endif
2871  RV = emitAutoreleaseOfResult(*this, RV);
2872  }
2873 
2874  break;
2875 
2876  case ABIArgInfo::Ignore:
2877  break;
2878 
2880  auto coercionType = RetAI.getCoerceAndExpandType();
2881  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2882 
2883  // Load all of the coerced elements out into results.
2885  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2886  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2887  auto coercedEltType = coercionType->getElementType(i);
2888  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2889  continue;
2890 
2891  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2892  auto elt = Builder.CreateLoad(eltAddr);
2893  results.push_back(elt);
2894  }
2895 
2896  // If we have one result, it's the single direct result type.
2897  if (results.size() == 1) {
2898  RV = results[0];
2899 
2900  // Otherwise, we need to make a first-class aggregate.
2901  } else {
2902  // Construct a return type that lacks padding elements.
2903  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2904 
2905  RV = llvm::UndefValue::get(returnType);
2906  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2907  RV = Builder.CreateInsertValue(RV, results[i], i);
2908  }
2909  }
2910  break;
2911  }
2912 
2913  case ABIArgInfo::Expand:
2914  llvm_unreachable("Invalid ABI kind for return argument");
2915  }
2916 
2917  llvm::Instruction *Ret;
2918  if (RV) {
2919  EmitReturnValueCheck(RV);
2920  Ret = Builder.CreateRet(RV);
2921  } else {
2922  Ret = Builder.CreateRetVoid();
2923  }
2924 
2925  if (RetDbgLoc)
2926  Ret->setDebugLoc(std::move(RetDbgLoc));
2927 }
2928 
2930  // A current decl may not be available when emitting vtable thunks.
2931  if (!CurCodeDecl)
2932  return;
2933 
2934  ReturnsNonNullAttr *RetNNAttr = nullptr;
2935  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2936  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2937 
2938  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2939  return;
2940 
2941  // Prefer the returns_nonnull attribute if it's present.
2942  SourceLocation AttrLoc;
2943  SanitizerMask CheckKind;
2944  SanitizerHandler Handler;
2945  if (RetNNAttr) {
2946  assert(!requiresReturnValueNullabilityCheck() &&
2947  "Cannot check nullability and the nonnull attribute");
2948  AttrLoc = RetNNAttr->getLocation();
2949  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2950  Handler = SanitizerHandler::NonnullReturn;
2951  } else {
2952  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2953  if (auto *TSI = DD->getTypeSourceInfo())
2954  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2955  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2956  CheckKind = SanitizerKind::NullabilityReturn;
2957  Handler = SanitizerHandler::NullabilityReturn;
2958  }
2959 
2960  SanitizerScope SanScope(this);
2961 
2962  // Make sure the "return" source location is valid. If we're checking a
2963  // nullability annotation, make sure the preconditions for the check are met.
2964  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
2965  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
2966  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
2967  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
2968  if (requiresReturnValueNullabilityCheck())
2969  CanNullCheck =
2970  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
2971  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
2972  EmitBlock(Check);
2973 
2974  // Now do the null check.
2975  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
2976  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
2977  llvm::Value *DynamicData[] = {SLocPtr};
2978  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
2979 
2980  EmitBlock(NoCheck);
2981 
2982 #ifndef NDEBUG
2983  // The return location should not be used after the check has been emitted.
2984  ReturnLocation = Address::invalid();
2985 #endif
2986 }
2987 
2989  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2990  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2991 }
2992 
2994  QualType Ty) {
2995  // FIXME: Generate IR in one pass, rather than going back and fixing up these
2996  // placeholders.
2997  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2998  llvm::Type *IRPtrTy = IRTy->getPointerTo();
2999  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3000 
3001  // FIXME: When we generate this IR in one pass, we shouldn't need
3002  // this win32-specific alignment hack.
3004  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3005 
3006  return AggValueSlot::forAddr(Address(Placeholder, Align),
3007  Ty.getQualifiers(),
3011 }
3012 
3014  const VarDecl *param,
3015  SourceLocation loc) {
3016  // StartFunction converted the ABI-lowered parameter(s) into a
3017  // local alloca. We need to turn that into an r-value suitable
3018  // for EmitCall.
3019  Address local = GetAddrOfLocalVar(param);
3020 
3021  QualType type = param->getType();
3022 
3023  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3024  "cannot emit delegate call arguments for inalloca arguments!");
3025 
3026  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3027  // but the argument needs to be the original pointer.
3028  if (type->isReferenceType()) {
3029  args.add(RValue::get(Builder.CreateLoad(local)), type);
3030 
3031  // In ARC, move out of consumed arguments so that the release cleanup
3032  // entered by StartFunction doesn't cause an over-release. This isn't
3033  // optimal -O0 code generation, but it should get cleaned up when
3034  // optimization is enabled. This also assumes that delegate calls are
3035  // performed exactly once for a set of arguments, but that should be safe.
3036  } else if (getLangOpts().ObjCAutoRefCount &&
3037  param->hasAttr<NSConsumedAttr>() &&
3038  type->isObjCRetainableType()) {
3039  llvm::Value *ptr = Builder.CreateLoad(local);
3040  auto null =
3041  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3042  Builder.CreateStore(null, local);
3043  args.add(RValue::get(ptr), type);
3044 
3045  // For the most part, we just need to load the alloca, except that
3046  // aggregate r-values are actually pointers to temporaries.
3047  } else {
3048  args.add(convertTempToRValue(local, type, loc), type);
3049  }
3050 }
3051 
3052 static bool isProvablyNull(llvm::Value *addr) {
3053  return isa<llvm::ConstantPointerNull>(addr);
3054 }
3055 
3056 /// Emit the actual writing-back of a writeback.
3058  const CallArgList::Writeback &writeback) {
3059  const LValue &srcLV = writeback.Source;
3060  Address srcAddr = srcLV.getAddress();
3061  assert(!isProvablyNull(srcAddr.getPointer()) &&
3062  "shouldn't have writeback for provably null argument");
3063 
3064  llvm::BasicBlock *contBB = nullptr;
3065 
3066  // If the argument wasn't provably non-null, we need to null check
3067  // before doing the store.
3068  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3069  CGF.CGM.getDataLayout());
3070  if (!provablyNonNull) {
3071  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3072  contBB = CGF.createBasicBlock("icr.done");
3073 
3074  llvm::Value *isNull =
3075  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3076  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3077  CGF.EmitBlock(writebackBB);
3078  }
3079 
3080  // Load the value to writeback.
3081  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3082 
3083  // Cast it back, in case we're writing an id to a Foo* or something.
3084  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3085  "icr.writeback-cast");
3086 
3087  // Perform the writeback.
3088 
3089  // If we have a "to use" value, it's something we need to emit a use
3090  // of. This has to be carefully threaded in: if it's done after the
3091  // release it's potentially undefined behavior (and the optimizer
3092  // will ignore it), and if it happens before the retain then the
3093  // optimizer could move the release there.
3094  if (writeback.ToUse) {
3095  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3096 
3097  // Retain the new value. No need to block-copy here: the block's
3098  // being passed up the stack.
3099  value = CGF.EmitARCRetainNonBlock(value);
3100 
3101  // Emit the intrinsic use here.
3102  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3103 
3104  // Load the old value (primitively).
3105  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3106 
3107  // Put the new value in place (primitively).
3108  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3109 
3110  // Release the old value.
3111  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3112 
3113  // Otherwise, we can just do a normal lvalue store.
3114  } else {
3115  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3116  }
3117 
3118  // Jump to the continuation block.
3119  if (!provablyNonNull)
3120  CGF.EmitBlock(contBB);
3121 }
3122 
3124  const CallArgList &args) {
3125  for (const auto &I : args.writebacks())
3126  emitWriteback(CGF, I);
3127 }
3128 
3130  const CallArgList &CallArgs) {
3133  CallArgs.getCleanupsToDeactivate();
3134  // Iterate in reverse to increase the likelihood of popping the cleanup.
3135  for (const auto &I : llvm::reverse(Cleanups)) {
3136  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3137  I.IsActiveIP->eraseFromParent();
3138  }
3139 }
3140 
3141 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3142  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3143  if (uop->getOpcode() == UO_AddrOf)
3144  return uop->getSubExpr();
3145  return nullptr;
3146 }
3147 
3148 /// Emit an argument that's being passed call-by-writeback. That is,
3149 /// we are passing the address of an __autoreleased temporary; it
3150 /// might be copy-initialized with the current value of the given
3151 /// address, but it will definitely be copied out of after the call.
3153  const ObjCIndirectCopyRestoreExpr *CRE) {
3154  LValue srcLV;
3155 
3156  // Make an optimistic effort to emit the address as an l-value.
3157  // This can fail if the argument expression is more complicated.
3158  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3159  srcLV = CGF.EmitLValue(lvExpr);
3160 
3161  // Otherwise, just emit it as a scalar.
3162  } else {
3163  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3164 
3165  QualType srcAddrType =
3166  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3167  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3168  }
3169  Address srcAddr = srcLV.getAddress();
3170 
3171  // The dest and src types don't necessarily match in LLVM terms
3172  // because of the crazy ObjC compatibility rules.
3173 
3174  llvm::PointerType *destType =
3175  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3176 
3177  // If the address is a constant null, just pass the appropriate null.
3178  if (isProvablyNull(srcAddr.getPointer())) {
3179  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3180  CRE->getType());
3181  return;
3182  }
3183 
3184  // Create the temporary.
3185  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3186  CGF.getPointerAlign(),
3187  "icr.temp");
3188  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3189  // and that cleanup will be conditional if we can't prove that the l-value
3190  // isn't null, so we need to register a dominating point so that the cleanups
3191  // system will make valid IR.
3193 
3194  // Zero-initialize it if we're not doing a copy-initialization.
3195  bool shouldCopy = CRE->shouldCopy();
3196  if (!shouldCopy) {
3197  llvm::Value *null =
3198  llvm::ConstantPointerNull::get(
3199  cast<llvm::PointerType>(destType->getElementType()));
3200  CGF.Builder.CreateStore(null, temp);
3201  }
3202 
3203  llvm::BasicBlock *contBB = nullptr;
3204  llvm::BasicBlock *originBB = nullptr;
3205 
3206  // If the address is *not* known to be non-null, we need to switch.
3207  llvm::Value *finalArgument;
3208 
3209  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3210  CGF.CGM.getDataLayout());
3211  if (provablyNonNull) {
3212  finalArgument = temp.getPointer();
3213  } else {
3214  llvm::Value *isNull =
3215  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3216 
3217  finalArgument = CGF.Builder.CreateSelect(isNull,
3218  llvm::ConstantPointerNull::get(destType),
3219  temp.getPointer(), "icr.argument");
3220 
3221  // If we need to copy, then the load has to be conditional, which
3222  // means we need control flow.
3223  if (shouldCopy) {
3224  originBB = CGF.Builder.GetInsertBlock();
3225  contBB = CGF.createBasicBlock("icr.cont");
3226  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3227  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3228  CGF.EmitBlock(copyBB);
3229  condEval.begin(CGF);
3230  }
3231  }
3232 
3233  llvm::Value *valueToUse = nullptr;
3234 
3235  // Perform a copy if necessary.
3236  if (shouldCopy) {
3237  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3238  assert(srcRV.isScalar());
3239 
3240  llvm::Value *src = srcRV.getScalarVal();
3241  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3242  "icr.cast");
3243 
3244  // Use an ordinary store, not a store-to-lvalue.
3245  CGF.Builder.CreateStore(src, temp);
3246 
3247  // If optimization is enabled, and the value was held in a
3248  // __strong variable, we need to tell the optimizer that this
3249  // value has to stay alive until we're doing the store back.
3250  // This is because the temporary is effectively unretained,
3251  // and so otherwise we can violate the high-level semantics.
3252  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3254  valueToUse = src;
3255  }
3256  }
3257 
3258  // Finish the control flow if we needed it.
3259  if (shouldCopy && !provablyNonNull) {
3260  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3261  CGF.EmitBlock(contBB);
3262 
3263  // Make a phi for the value to intrinsically use.
3264  if (valueToUse) {
3265  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3266  "icr.to-use");
3267  phiToUse->addIncoming(valueToUse, copyBB);
3268  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3269  originBB);
3270  valueToUse = phiToUse;
3271  }
3272 
3273  condEval.end(CGF);
3274  }
3275 
3276  args.addWriteback(srcLV, temp, valueToUse);
3277  args.add(RValue::get(finalArgument), CRE->getType());
3278 }
3279 
3281  assert(!StackBase);
3282 
3283  // Save the stack.
3284  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3285  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3286 }
3287 
3289  if (StackBase) {
3290  // Restore the stack after the call.
3291  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3292  CGF.Builder.CreateCall(F, StackBase);
3293  }
3294 }
3295 
3297  SourceLocation ArgLoc,
3298  AbstractCallee AC,
3299  unsigned ParmNum) {
3300  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3301  SanOpts.has(SanitizerKind::NullabilityArg)))
3302  return;
3303 
3304  // The param decl may be missing in a variadic function.
3305  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3306  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3307 
3308  // Prefer the nonnull attribute if it's present.
3309  const NonNullAttr *NNAttr = nullptr;
3310  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3311  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3312 
3313  bool CanCheckNullability = false;
3314  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3315  auto Nullability = PVD->getType()->getNullability(getContext());
3316  CanCheckNullability = Nullability &&
3318  PVD->getTypeSourceInfo();
3319  }
3320 
3321  if (!NNAttr && !CanCheckNullability)
3322  return;
3323 
3324  SourceLocation AttrLoc;
3325  SanitizerMask CheckKind;
3326  SanitizerHandler Handler;
3327  if (NNAttr) {
3328  AttrLoc = NNAttr->getLocation();
3329  CheckKind = SanitizerKind::NonnullAttribute;
3330  Handler = SanitizerHandler::NonnullArg;
3331  } else {
3332  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3333  CheckKind = SanitizerKind::NullabilityArg;
3334  Handler = SanitizerHandler::NullabilityArg;
3335  }
3336 
3337  SanitizerScope SanScope(this);
3338  assert(RV.isScalar());
3339  llvm::Value *V = RV.getScalarVal();
3340  llvm::Value *Cond =
3341  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3342  llvm::Constant *StaticData[] = {
3343  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3344  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3345  };
3346  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3347 }
3348 
3350  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3351  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3352  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3353  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3354 
3355  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3356  // because arguments are destroyed left to right in the callee. As a special
3357  // case, there are certain language constructs that require left-to-right
3358  // evaluation, and in those cases we consider the evaluation order requirement
3359  // to trump the "destruction order is reverse construction order" guarantee.
3360  bool LeftToRight =
3361  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3362  ? Order == EvaluationOrder::ForceLeftToRight
3363  : Order != EvaluationOrder::ForceRightToLeft;
3364 
3365  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3366  RValue EmittedArg) {
3367  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3368  return;
3369  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3370  if (PS == nullptr)
3371  return;
3372 
3373  const auto &Context = getContext();
3374  auto SizeTy = Context.getSizeType();
3375  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3376  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3377  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3378  EmittedArg.getScalarVal());
3379  Args.add(RValue::get(V), SizeTy);
3380  // If we're emitting args in reverse, be sure to do so with
3381  // pass_object_size, as well.
3382  if (!LeftToRight)
3383  std::swap(Args.back(), *(&Args.back() - 1));
3384  };
3385 
3386  // Insert a stack save if we're going to need any inalloca args.
3387  bool HasInAllocaArgs = false;
3388  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3389  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3390  I != E && !HasInAllocaArgs; ++I)
3391  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3392  if (HasInAllocaArgs) {
3393  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3394  Args.allocateArgumentMemory(*this);
3395  }
3396  }
3397 
3398  // Evaluate each argument in the appropriate order.
3399  size_t CallArgsStart = Args.size();
3400  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3401  unsigned Idx = LeftToRight ? I : E - I - 1;
3402  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3403  unsigned InitialArgSize = Args.size();
3404  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3405  // the argument and parameter match or the objc method is parameterized.
3406  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3407  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3408  ArgTypes[Idx]) ||
3409  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3410  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3411  "Argument and parameter types don't match");
3412  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3413  // In particular, we depend on it being the last arg in Args, and the
3414  // objectsize bits depend on there only being one arg if !LeftToRight.
3415  assert(InitialArgSize + 1 == Args.size() &&
3416  "The code below depends on only adding one arg per EmitCallArg");
3417  (void)InitialArgSize;
3418  RValue RVArg = Args.back().RV;
3419  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3420  ParamsToSkip + Idx);
3421  // @llvm.objectsize should never have side-effects and shouldn't need
3422  // destruction/cleanups, so we can safely "emit" it after its arg,
3423  // regardless of right-to-leftness
3424  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3425  }
3426 
3427  if (!LeftToRight) {
3428  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3429  // IR function.
3430  std::reverse(Args.begin() + CallArgsStart, Args.end());
3431  }
3432 }
3433 
3434 namespace {
3435 
3436 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3437  DestroyUnpassedArg(Address Addr, QualType Ty)
3438  : Addr(Addr), Ty(Ty) {}
3439 
3440  Address Addr;
3441  QualType Ty;
3442 
3443  void Emit(CodeGenFunction &CGF, Flags flags) override {
3444  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3445  assert(!Dtor->isTrivial());
3446  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3447  /*Delegating=*/false, Addr);
3448  }
3449 };
3450 
3451 struct DisableDebugLocationUpdates {
3452  CodeGenFunction &CGF;
3453  bool disabledDebugInfo;
3454  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3455  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3456  CGF.disableDebugInfo();
3457  }
3458  ~DisableDebugLocationUpdates() {
3459  if (disabledDebugInfo)
3460  CGF.enableDebugInfo();
3461  }
3462 };
3463 
3464 } // end anonymous namespace
3465 
3467  QualType type) {
3468  DisableDebugLocationUpdates Dis(*this, E);
3469  if (const ObjCIndirectCopyRestoreExpr *CRE
3470  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3471  assert(getLangOpts().ObjCAutoRefCount);
3472  return emitWritebackArg(*this, args, CRE);
3473  }
3474 
3475  assert(type->isReferenceType() == E->isGLValue() &&
3476  "reference binding to unmaterialized r-value!");
3477 
3478  if (E->isGLValue()) {
3479  assert(E->getObjectKind() == OK_Ordinary);
3480  return args.add(EmitReferenceBindingToExpr(E), type);
3481  }
3482 
3483  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3484 
3485  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3486  // However, we still have to push an EH-only cleanup in case we unwind before
3487  // we make it to the call.
3488  if (HasAggregateEvalKind &&
3489  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3490  // If we're using inalloca, use the argument memory. Otherwise, use a
3491  // temporary.
3492  AggValueSlot Slot;
3493  if (args.isUsingInAlloca())
3494  Slot = createPlaceholderSlot(*this, type);
3495  else
3496  Slot = CreateAggTemp(type, "agg.tmp");
3497 
3498  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3499  bool DestroyedInCallee =
3500  RD && RD->hasNonTrivialDestructor() &&
3501  CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
3502  if (DestroyedInCallee)
3503  Slot.setExternallyDestructed();
3504 
3505  EmitAggExpr(E, Slot);
3506  RValue RV = Slot.asRValue();
3507  args.add(RV, type);
3508 
3509  if (DestroyedInCallee) {
3510  // Create a no-op GEP between the placeholder and the cleanup so we can
3511  // RAUW it successfully. It also serves as a marker of the first
3512  // instruction where the cleanup is active.
3513  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3514  type);
3515  // This unreachable is a temporary marker which will be removed later.
3516  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3517  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3518  }
3519  return;
3520  }
3521 
3522  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3523  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3524  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3525  assert(L.isSimple());
3526  if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3527  args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3528  } else {
3529  // We can't represent a misaligned lvalue in the CallArgList, so copy
3530  // to an aligned temporary now.
3531  Address tmp = CreateMemTemp(type);
3532  EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3533  args.add(RValue::getAggregate(tmp), type);
3534  }
3535  return;
3536  }
3537 
3538  args.add(EmitAnyExprToTemp(E), type);
3539 }
3540 
3541 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3542  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3543  // implicitly widens null pointer constants that are arguments to varargs
3544  // functions to pointer-sized ints.
3545  if (!getTarget().getTriple().isOSWindows())
3546  return Arg->getType();
3547 
3548  if (Arg->getType()->isIntegerType() &&
3549  getContext().getTypeSize(Arg->getType()) <
3553  return getContext().getIntPtrType();
3554  }
3555 
3556  return Arg->getType();
3557 }
3558 
3559 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3560 // optimizer it can aggressively ignore unwind edges.
3561 void
3562 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3563  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3564  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3565  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3566  CGM.getNoObjCARCExceptionsMetadata());
3567 }
3568 
3569 /// Emits a call to the given no-arguments nounwind runtime function.
3570 llvm::CallInst *
3572  const llvm::Twine &name) {
3573  return EmitNounwindRuntimeCall(callee, None, name);
3574 }
3575 
3576 /// Emits a call to the given nounwind runtime function.
3577 llvm::CallInst *
3580  const llvm::Twine &name) {
3581  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3582  call->setDoesNotThrow();
3583  return call;
3584 }
3585 
3586 /// Emits a simple call (never an invoke) to the given no-arguments
3587 /// runtime function.
3588 llvm::CallInst *
3590  const llvm::Twine &name) {
3591  return EmitRuntimeCall(callee, None, name);
3592 }
3593 
3594 // Calls which may throw must have operand bundles indicating which funclet
3595 // they are nested within.
3596 static void
3597 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
3599  // There is no need for a funclet operand bundle if we aren't inside a
3600  // funclet.
3601  if (!CurrentFuncletPad)
3602  return;
3603 
3604  // Skip intrinsics which cannot throw.
3605  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3606  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3607  return;
3608 
3609  BundleList.emplace_back("funclet", CurrentFuncletPad);
3610 }
3611 
3612 /// Emits a simple call (never an invoke) to the given runtime function.
3613 llvm::CallInst *
3616  const llvm::Twine &name) {
3618  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3619 
3620  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
3621  call->setCallingConv(getRuntimeCC());
3622  return call;
3623 }
3624 
3625 /// Emits a call or invoke to the given noreturn runtime function.
3627  ArrayRef<llvm::Value*> args) {
3629  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3630 
3631  if (getInvokeDest()) {
3632  llvm::InvokeInst *invoke =
3633  Builder.CreateInvoke(callee,
3634  getUnreachableBlock(),
3635  getInvokeDest(),
3636  args,
3637  BundleList);
3638  invoke->setDoesNotReturn();
3639  invoke->setCallingConv(getRuntimeCC());
3640  } else {
3641  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3642  call->setDoesNotReturn();
3643  call->setCallingConv(getRuntimeCC());
3644  Builder.CreateUnreachable();
3645  }
3646 }
3647 
3648 /// Emits a call or invoke instruction to the given nullary runtime function.
3649 llvm::CallSite
3651  const Twine &name) {
3652  return EmitRuntimeCallOrInvoke(callee, None, name);
3653 }
3654 
3655 /// Emits a call or invoke instruction to the given runtime function.
3656 llvm::CallSite
3659  const Twine &name) {
3660  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3661  callSite.setCallingConv(getRuntimeCC());
3662  return callSite;
3663 }
3664 
3665 /// Emits a call or invoke instruction to the given function, depending
3666 /// on the current state of the EH stack.
3667 llvm::CallSite
3670  const Twine &Name) {
3671  llvm::BasicBlock *InvokeDest = getInvokeDest();
3673  getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3674 
3675  llvm::Instruction *Inst;
3676  if (!InvokeDest)
3677  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3678  else {
3679  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3680  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3681  Name);
3682  EmitBlock(ContBB);
3683  }
3684 
3685  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3686  // optimizer it can aggressively ignore unwind edges.
3687  if (CGM.getLangOpts().ObjCAutoRefCount)
3688  AddObjCARCExceptionMetadata(Inst);
3689 
3690  return llvm::CallSite(Inst);
3691 }
3692 
3693 /// \brief Store a non-aggregate value to an address to initialize it. For
3694 /// initialization, a non-atomic store will be used.
3696  LValue Dst) {
3697  if (Src.isScalar())
3698  CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3699  else
3700  CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3701 }
3702 
3703 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3704  llvm::Value *New) {
3705  DeferredReplacements.push_back(std::make_pair(Old, New));
3706 }
3707 
3709  const CGCallee &Callee,
3710  ReturnValueSlot ReturnValue,
3711  const CallArgList &CallArgs,
3712  llvm::Instruction **callOrInvoke) {
3713  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3714 
3715  assert(Callee.isOrdinary());
3716 
3717  // Handle struct-return functions by passing a pointer to the
3718  // location that we would like to return into.
3719  QualType RetTy = CallInfo.getReturnType();
3720  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3721 
3722  llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3723 
3724  // 1. Set up the arguments.
3725 
3726  // If we're using inalloca, insert the allocation after the stack save.
3727  // FIXME: Do this earlier rather than hacking it in here!
3728  Address ArgMemory = Address::invalid();
3729  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3730  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3731  const llvm::DataLayout &DL = CGM.getDataLayout();
3732  ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3733  llvm::Instruction *IP = CallArgs.getStackBase();
3734  llvm::AllocaInst *AI;
3735  if (IP) {
3736  IP = IP->getNextNode();
3737  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3738  "argmem", IP);
3739  } else {
3740  AI = CreateTempAlloca(ArgStruct, "argmem");
3741  }
3742  auto Align = CallInfo.getArgStructAlignment();
3743  AI->setAlignment(Align.getQuantity());
3744  AI->setUsedWithInAlloca(true);
3745  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3746  ArgMemory = Address(AI, Align);
3747  }
3748 
3749  // Helper function to drill into the inalloca allocation.
3750  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3751  auto FieldOffset =
3752  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3753  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3754  };
3755 
3756  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3757  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3758 
3759  // If the call returns a temporary with struct return, create a temporary
3760  // alloca to hold the result, unless one is given to us.
3761  Address SRetPtr = Address::invalid();
3762  size_t UnusedReturnSize = 0;
3763  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3764  if (!ReturnValue.isNull()) {
3765  SRetPtr = ReturnValue.getValue();
3766  } else {
3767  SRetPtr = CreateMemTemp(RetTy);
3768  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3769  uint64_t size =
3770  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3771  if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3772  UnusedReturnSize = size;
3773  }
3774  }
3775  if (IRFunctionArgs.hasSRetArg()) {
3776  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3777  } else if (RetAI.isInAlloca()) {
3778  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3779  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3780  }
3781  }
3782 
3783  Address swiftErrorTemp = Address::invalid();
3784  Address swiftErrorArg = Address::invalid();
3785 
3786  // Translate all of the arguments as necessary to match the IR lowering.
3787  assert(CallInfo.arg_size() == CallArgs.size() &&
3788  "Mismatch between function signature & arguments.");
3789  unsigned ArgNo = 0;
3790  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3791  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3792  I != E; ++I, ++info_it, ++ArgNo) {
3793  const ABIArgInfo &ArgInfo = info_it->info;
3794  RValue RV = I->RV;
3795 
3796  // Insert a padding argument to ensure proper alignment.
3797  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3798  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3799  llvm::UndefValue::get(ArgInfo.getPaddingType());
3800 
3801  unsigned FirstIRArg, NumIRArgs;
3802  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3803 
3804  switch (ArgInfo.getKind()) {
3805  case ABIArgInfo::InAlloca: {
3806  assert(NumIRArgs == 0);
3807  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3808  if (RV.isAggregate()) {
3809  // Replace the placeholder with the appropriate argument slot GEP.
3810  llvm::Instruction *Placeholder =
3811  cast<llvm::Instruction>(RV.getAggregatePointer());
3812  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3813  Builder.SetInsertPoint(Placeholder);
3814  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3815  Builder.restoreIP(IP);
3816  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3817  } else {
3818  // Store the RValue into the argument struct.
3819  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3820  unsigned AS = Addr.getType()->getPointerAddressSpace();
3821  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3822  // There are some cases where a trivial bitcast is not avoidable. The
3823  // definition of a type later in a translation unit may change it's type
3824  // from {}* to (%struct.foo*)*.
3825  if (Addr.getType() != MemType)
3826  Addr = Builder.CreateBitCast(Addr, MemType);
3827  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3828  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3829  }
3830  break;
3831  }
3832 
3833  case ABIArgInfo::Indirect: {
3834  assert(NumIRArgs == 1);
3835  if (RV.isScalar() || RV.isComplex()) {
3836  // Make a temporary alloca to pass the argument.
3837  Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
3838  "indirect-arg-temp", false);
3839  IRCallArgs[FirstIRArg] = Addr.getPointer();
3840 
3841  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3842  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3843  } else {
3844  // We want to avoid creating an unnecessary temporary+copy here;
3845  // however, we need one in three cases:
3846  // 1. If the argument is not byval, and we are required to copy the
3847  // source. (This case doesn't occur on any common architecture.)
3848  // 2. If the argument is byval, RV is not sufficiently aligned, and
3849  // we cannot force it to be sufficiently aligned.
3850  // 3. If the argument is byval, but RV is located in an address space
3851  // different than that of the argument (0).
3852  Address Addr = RV.getAggregateAddress();
3853  CharUnits Align = ArgInfo.getIndirectAlign();
3854  const llvm::DataLayout *TD = &CGM.getDataLayout();
3855  const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3856  const unsigned ArgAddrSpace =
3857  (FirstIRArg < IRFuncTy->getNumParams()
3858  ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3859  : 0);
3860  if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3861  (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3862  llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3863  Align.getQuantity(), *TD)
3864  < Align.getQuantity()) ||
3865  (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3866  // Create an aligned temporary, and copy to it.
3867  Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
3868  "byval-temp", false);
3869  IRCallArgs[FirstIRArg] = AI.getPointer();
3870  EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3871  } else {
3872  // Skip the extra memcpy call.
3873  IRCallArgs[FirstIRArg] = Addr.getPointer();
3874  }
3875  }
3876  break;
3877  }
3878 
3879  case ABIArgInfo::Ignore:
3880  assert(NumIRArgs == 0);
3881  break;
3882 
3883  case ABIArgInfo::Extend:
3884  case ABIArgInfo::Direct: {
3885  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3886  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3887  ArgInfo.getDirectOffset() == 0) {
3888  assert(NumIRArgs == 1);
3889  llvm::Value *V;
3890  if (RV.isScalar())
3891  V = RV.getScalarVal();
3892  else
3893  V = Builder.CreateLoad(RV.getAggregateAddress());
3894 
3895  // Implement swifterror by copying into a new swifterror argument.
3896  // We'll write back in the normal path out of the call.
3897  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3899  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3900 
3901  QualType pointeeTy = I->Ty->getPointeeType();
3902  swiftErrorArg =
3903  Address(V, getContext().getTypeAlignInChars(pointeeTy));
3904 
3905  swiftErrorTemp =
3906  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3907  V = swiftErrorTemp.getPointer();
3908  cast<llvm::AllocaInst>(V)->setSwiftError(true);
3909 
3910  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3911  Builder.CreateStore(errorValue, swiftErrorTemp);
3912  }
3913 
3914  // We might have to widen integers, but we should never truncate.
3915  if (ArgInfo.getCoerceToType() != V->getType() &&
3916  V->getType()->isIntegerTy())
3917  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3918 
3919  // If the argument doesn't match, perform a bitcast to coerce it. This
3920  // can happen due to trivial type mismatches.
3921  if (FirstIRArg < IRFuncTy->getNumParams() &&
3922  V->getType() != IRFuncTy->getParamType(FirstIRArg))
3923  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3924 
3925  IRCallArgs[FirstIRArg] = V;
3926  break;
3927  }
3928 
3929  // FIXME: Avoid the conversion through memory if possible.
3930  Address Src = Address::invalid();
3931  if (RV.isScalar() || RV.isComplex()) {
3932  Src = CreateMemTemp(I->Ty, "coerce");
3933  LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3934  EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3935  } else {
3936  Src = RV.getAggregateAddress();
3937  }
3938 
3939  // If the value is offset in memory, apply the offset now.
3940  Src = emitAddressAtOffset(*this, Src, ArgInfo);
3941 
3942  // Fast-isel and the optimizer generally like scalar values better than
3943  // FCAs, so we flatten them if this is safe to do for this argument.
3944  llvm::StructType *STy =
3945  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3946  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3947  llvm::Type *SrcTy = Src.getType()->getElementType();
3948  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3949  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3950 
3951  // If the source type is smaller than the destination type of the
3952  // coerce-to logic, copy the source value into a temp alloca the size
3953  // of the destination type to allow loading all of it. The bits past
3954  // the source value are left undef.
3955  if (SrcSize < DstSize) {
3956  Address TempAlloca
3957  = CreateTempAlloca(STy, Src.getAlignment(),
3958  Src.getName() + ".coerce");
3959  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3960  Src = TempAlloca;
3961  } else {
3962  Src = Builder.CreateBitCast(Src,
3963  STy->getPointerTo(Src.getAddressSpace()));
3964  }
3965 
3966  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3967  assert(NumIRArgs == STy->getNumElements());
3968  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3969  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3970  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3971  llvm::Value *LI = Builder.CreateLoad(EltPtr);
3972  IRCallArgs[FirstIRArg + i] = LI;
3973  }
3974  } else {
3975  // In the simple case, just pass the coerced loaded value.
3976  assert(NumIRArgs == 1);
3977  IRCallArgs[FirstIRArg] =
3978  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3979  }
3980 
3981  break;
3982  }
3983 
3985  auto coercionType = ArgInfo.getCoerceAndExpandType();
3986  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
3987 
3988  llvm::Value *tempSize = nullptr;
3989  Address addr = Address::invalid();
3990  if (RV.isAggregate()) {
3991  addr = RV.getAggregateAddress();
3992  } else {
3993  assert(RV.isScalar()); // complex should always just be direct
3994 
3995  llvm::Type *scalarType = RV.getScalarVal()->getType();
3996  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
3997  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
3998 
3999  tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
4000 
4001  // Materialize to a temporary.
4002  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4003  CharUnits::fromQuantity(std::max(layout->getAlignment(),
4004  scalarAlign)));
4005  EmitLifetimeStart(scalarSize, addr.getPointer());
4006 
4007  Builder.CreateStore(RV.getScalarVal(), addr);
4008  }
4009 
4010  addr = Builder.CreateElementBitCast(addr, coercionType);
4011 
4012  unsigned IRArgPos = FirstIRArg;
4013  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4014  llvm::Type *eltType = coercionType->getElementType(i);
4015  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4016  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4017  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4018  IRCallArgs[IRArgPos++] = elt;
4019  }
4020  assert(IRArgPos == FirstIRArg + NumIRArgs);
4021 
4022  if (tempSize) {
4023  EmitLifetimeEnd(tempSize, addr.getPointer());
4024  }
4025 
4026  break;
4027  }
4028 
4029  case ABIArgInfo::Expand:
4030  unsigned IRArgPos = FirstIRArg;
4031  ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
4032  assert(IRArgPos == FirstIRArg + NumIRArgs);
4033  break;
4034  }
4035  }
4036 
4037  llvm::Value *CalleePtr = Callee.getFunctionPointer();
4038 
4039  // If we're using inalloca, set up that argument.
4040  if (ArgMemory.isValid()) {
4041  llvm::Value *Arg = ArgMemory.getPointer();
4042  if (CallInfo.isVariadic()) {
4043  // When passing non-POD arguments by value to variadic functions, we will
4044  // end up with a variadic prototype and an inalloca call site. In such
4045  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4046  // the callee.
4047  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4048  auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4049  CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4050  } else {
4051  llvm::Type *LastParamTy =
4052  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4053  if (Arg->getType() != LastParamTy) {
4054 #ifndef NDEBUG
4055  // Assert that these structs have equivalent element types.
4056  llvm::StructType *FullTy = CallInfo.getArgStruct();
4057  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4058  cast<llvm::PointerType>(LastParamTy)->getElementType());
4059  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4060  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4061  DE = DeclaredTy->element_end(),
4062  FI = FullTy->element_begin();
4063  DI != DE; ++DI, ++FI)
4064  assert(*DI == *FI);
4065 #endif
4066  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4067  }
4068  }
4069  assert(IRFunctionArgs.hasInallocaArg());
4070  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4071  }
4072 
4073  // 2. Prepare the function pointer.
4074 
4075  // If the callee is a bitcast of a non-variadic function to have a
4076  // variadic function pointer type, check to see if we can remove the
4077  // bitcast. This comes up with unprototyped functions.
4078  //
4079  // This makes the IR nicer, but more importantly it ensures that we
4080  // can inline the function at -O0 if it is marked always_inline.
4081  auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4082  llvm::FunctionType *CalleeFT =
4083  cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4084  if (!CalleeFT->isVarArg())
4085  return Ptr;
4086 
4087  llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4088  if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4089  return Ptr;
4090 
4091  llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4092  if (!OrigFn)
4093  return Ptr;
4094 
4095  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4096 
4097  // If the original type is variadic, or if any of the component types
4098  // disagree, we cannot remove the cast.
4099  if (OrigFT->isVarArg() ||
4100  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4101  OrigFT->getReturnType() != CalleeFT->getReturnType())
4102  return Ptr;
4103 
4104  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4105  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4106  return Ptr;
4107 
4108  return OrigFn;
4109  };
4110  CalleePtr = simplifyVariadicCallee(CalleePtr);
4111 
4112  // 3. Perform the actual call.
4113 
4114  // Deactivate any cleanups that we're supposed to do immediately before
4115  // the call.
4116  if (!CallArgs.getCleanupsToDeactivate().empty())
4117  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4118 
4119  // Assert that the arguments we computed match up. The IR verifier
4120  // will catch this, but this is a common enough source of problems
4121  // during IRGen changes that it's way better for debugging to catch
4122  // it ourselves here.
4123 #ifndef NDEBUG
4124  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4125  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4126  // Inalloca argument can have different type.
4127  if (IRFunctionArgs.hasInallocaArg() &&
4128  i == IRFunctionArgs.getInallocaArgNo())
4129  continue;
4130  if (i < IRFuncTy->getNumParams())
4131  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4132  }
4133 #endif
4134 
4135  // Compute the calling convention and attributes.
4136  unsigned CallingConv;
4137  llvm::AttributeList Attrs;
4138  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4139  Callee.getAbstractInfo(), Attrs, CallingConv,
4140  /*AttrOnCallSite=*/true);
4141 
4142  // Apply some call-site-specific attributes.
4143  // TODO: work this into building the attribute set.
4144 
4145  // Apply always_inline to all calls within flatten functions.
4146  // FIXME: should this really take priority over __try, below?
4147  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4148  !(Callee.getAbstractInfo().getCalleeDecl() &&
4149  Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4150  Attrs =
4151  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4152  llvm::Attribute::AlwaysInline);
4153  }
4154 
4155  // Disable inlining inside SEH __try blocks.
4156  if (isSEHTryScope()) {
4157  Attrs =
4158  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4159  llvm::Attribute::NoInline);
4160  }
4161 
4162  // Decide whether to use a call or an invoke.
4163  bool CannotThrow;
4164  if (currentFunctionUsesSEHTry()) {
4165  // SEH cares about asynchronous exceptions, so everything can "throw."
4166  CannotThrow = false;
4167  } else if (isCleanupPadScope() &&
4169  // The MSVC++ personality will implicitly terminate the program if an
4170  // exception is thrown during a cleanup outside of a try/catch.
4171  // We don't need to model anything in IR to get this behavior.
4172  CannotThrow = true;
4173  } else {
4174  // Otherwise, nounwind call sites will never throw.
4175  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4176  llvm::Attribute::NoUnwind);
4177  }
4178  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4179 
4181  getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
4182 
4183  // Emit the actual call/invoke instruction.
4184  llvm::CallSite CS;
4185  if (!InvokeDest) {
4186  CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4187  } else {
4188  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4189  CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4190  BundleList);
4191  EmitBlock(Cont);
4192  }
4193  llvm::Instruction *CI = CS.getInstruction();
4194  if (callOrInvoke)
4195  *callOrInvoke = CI;
4196 
4197  // Apply the attributes and calling convention.
4198  CS.setAttributes(Attrs);
4199  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4200 
4201  // Apply various metadata.
4202 
4203  if (!CI->getType()->isVoidTy())
4204  CI->setName("call");
4205 
4206  // Insert instrumentation or attach profile metadata at indirect call sites.
4207  // For more details, see the comment before the definition of
4208  // IPVK_IndirectCallTarget in InstrProfData.inc.
4209  if (!CS.getCalledFunction())
4210  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4211  CI, CalleePtr);
4212 
4213  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4214  // optimizer it can aggressively ignore unwind edges.
4215  if (CGM.getLangOpts().ObjCAutoRefCount)
4216  AddObjCARCExceptionMetadata(CI);
4217 
4218  // Suppress tail calls if requested.
4219  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4220  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4221  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4222  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4223  }
4224 
4225  // 4. Finish the call.
4226 
4227  // If the call doesn't return, finish the basic block and clear the
4228  // insertion point; this allows the rest of IRGen to discard
4229  // unreachable code.
4230  if (CS.doesNotReturn()) {
4231  if (UnusedReturnSize)
4232  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4233  SRetPtr.getPointer());
4234 
4235  Builder.CreateUnreachable();
4236  Builder.ClearInsertionPoint();
4237 
4238  // FIXME: For now, emit a dummy basic block because expr emitters in
4239  // generally are not ready to handle emitting expressions at unreachable
4240  // points.
4241  EnsureInsertPoint();
4242 
4243  // Return a reasonable RValue.
4244  return GetUndefRValue(RetTy);
4245  }
4246 
4247  // Perform the swifterror writeback.
4248  if (swiftErrorTemp.isValid()) {
4249  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4250  Builder.CreateStore(errorResult, swiftErrorArg);
4251  }
4252 
4253  // Emit any call-associated writebacks immediately. Arguably this
4254  // should happen after any return-value munging.
4255  if (CallArgs.hasWritebacks())
4256  emitWritebacks(*this, CallArgs);
4257 
4258  // The stack cleanup for inalloca arguments has to run out of the normal
4259  // lexical order, so deactivate it and run it manually here.
4260  CallArgs.freeArgumentMemory(*this);
4261 
4262  // Extract the return value.
4263  RValue Ret = [&] {
4264  switch (RetAI.getKind()) {
4266  auto coercionType = RetAI.getCoerceAndExpandType();
4267  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4268 
4269  Address addr = SRetPtr;
4270  addr = Builder.CreateElementBitCast(addr, coercionType);
4271 
4272  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4273  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4274 
4275  unsigned unpaddedIndex = 0;
4276  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4277  llvm::Type *eltType = coercionType->getElementType(i);
4278  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4279  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4280  llvm::Value *elt = CI;
4281  if (requiresExtract)
4282  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4283  else
4284  assert(unpaddedIndex == 0);
4285  Builder.CreateStore(elt, eltAddr);
4286  }
4287  // FALLTHROUGH
4288  LLVM_FALLTHROUGH;
4289  }
4290 
4291  case ABIArgInfo::InAlloca:
4292  case ABIArgInfo::Indirect: {
4293  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4294  if (UnusedReturnSize)
4295  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4296  SRetPtr.getPointer());
4297  return ret;
4298  }
4299 
4300  case ABIArgInfo::Ignore:
4301  // If we are ignoring an argument that had a result, make sure to
4302  // construct the appropriate return value for our caller.
4303  return GetUndefRValue(RetTy);
4304 
4305  case ABIArgInfo::Extend:
4306  case ABIArgInfo::Direct: {
4307  llvm::Type *RetIRTy = ConvertType(RetTy);
4308  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4309  switch (getEvaluationKind(RetTy)) {
4310  case TEK_Complex: {
4311  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4312  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4313  return RValue::getComplex(std::make_pair(Real, Imag));
4314  }
4315  case TEK_Aggregate: {
4316  Address DestPtr = ReturnValue.getValue();
4317  bool DestIsVolatile = ReturnValue.isVolatile();
4318 
4319  if (!DestPtr.isValid()) {
4320  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4321  DestIsVolatile = false;
4322  }
4323  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4324  return RValue::getAggregate(DestPtr);
4325  }
4326  case TEK_Scalar: {
4327  // If the argument doesn't match, perform a bitcast to coerce it. This
4328  // can happen due to trivial type mismatches.
4329  llvm::Value *V = CI;
4330  if (V->getType() != RetIRTy)
4331  V = Builder.CreateBitCast(V, RetIRTy);
4332  return RValue::get(V);
4333  }
4334  }
4335  llvm_unreachable("bad evaluation kind");
4336  }
4337 
4338  Address DestPtr = ReturnValue.getValue();
4339  bool DestIsVolatile = ReturnValue.isVolatile();
4340 
4341  if (!DestPtr.isValid()) {
4342  DestPtr = CreateMemTemp(RetTy, "coerce");
4343  DestIsVolatile = false;
4344  }
4345 
4346  // If the value is offset in memory, apply the offset now.
4347  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4348  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4349 
4350  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4351  }
4352 
4353  case ABIArgInfo::Expand:
4354  llvm_unreachable("Invalid ABI kind for return argument");
4355  }
4356 
4357  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4358  } ();
4359 
4360  // Emit the assume_aligned check on the return value.
4361  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4362  if (Ret.isScalar() && TargetDecl) {
4363  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4364  llvm::Value *OffsetValue = nullptr;
4365  if (const auto *Offset = AA->getOffset())
4366  OffsetValue = EmitScalarExpr(Offset);
4367 
4368  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4369  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4370  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4371  OffsetValue);
4372  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4373  llvm::Value *ParamVal =
4374  CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
4375  EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4376  }
4377  }
4378 
4379  return Ret;
4380 }
4381 
4382 /* VarArg handling */
4383 
4385  VAListAddr = VE->isMicrosoftABI()
4386  ? EmitMSVAListRef(VE->getSubExpr())
4387  : EmitVAListRef(VE->getSubExpr());
4388  QualType Ty = VE->getType();
4389  if (VE->isMicrosoftABI())
4390  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4391  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4392 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:640
bool isAggregate() const
Definition: CGValue.h:54
const llvm::DataLayout & getDataLayout() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1464
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:177
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:281
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3204
TargetOptions & getTargetOpts() const
Retrieve the target options.
Definition: TargetInfo.h:122
FunctionDecl - An instance of this class is created to represent a function declaration or definition...
Definition: Decl.h:1631
Address getAddress() const
Definition: CGValue.h:559
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:614
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
Definition: CGCall.cpp:2929
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2222
Complete object ctor.
Definition: ABI.h:26
CanQualType VoidPtrTy
Definition: ASTContext.h:981
A (possibly-)qualified type.
Definition: Type.h:614
bool isBlockPointerType() const
Definition: Type.h:5772
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses &#39;sret&#39; when used as a return type.
Definition: CGCall.cpp:1481
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:1821
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:79
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3057
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1093
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:69
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2669
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:549
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:175
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:2964
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:187
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:435
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:782
const Decl * getCalleeDecl() const
Definition: CGCall.h:62
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:3547
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:751
Extend - Valid only for integer argument types.
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4384
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3052
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:81
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:242
bool isVirtual() const
Definition: DeclCXX.h:1962
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const Expr * getSubExpr() const
Definition: Expr.h:3797
bool isVolatile() const
Definition: CGValue.h:304
The base class of the type hierarchy.
Definition: Type.h:1300
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1777
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2143
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:5607
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:946
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:645
const ParmVarDecl * getParamDecl(unsigned I) const
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3668
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:1981
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:360
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2344
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2123
bool hasWritebacks() const
Definition: CGCall.h:232
Default closure variant of a ctor.
Definition: ABI.h:30
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3241
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
VarDecl - An instance of this class is created to represent a variable declaration or definition...
Definition: Decl.h:771
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:104
llvm::Instruction * getStackBase() const
Definition: CGCall.h:254
unsigned getNumParams() const
Definition: Type.h:3393
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:157
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1197
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6099
void setCoerceToType(llvm::Type *T)
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3063
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:113
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, LValue Dst)
Store a non-aggregate value to an address to initialize it.
Definition: CGCall.cpp:3695
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3296
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:234
llvm::Value * getPointer() const
Definition: Address.h:38
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:288
Address getValue() const
Definition: CGCall.h:301
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
ParmVarDecl - Represents a parameter to a function.
Definition: Decl.h:1447
std::vector< std::string > Reciprocals
Definition: TargetOptions.h:57
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:57
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:46
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:407
RecordDecl - Represents a struct/union/class.
Definition: Decl.h:3384
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3288
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:311
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2318
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:851
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3123
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2749
Address getAddress() const
Definition: CGValue.h:330
unsigned getRegParm() const
Definition: Type.h:3038
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:128
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:3548
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
field_range fields() const
Definition: Decl.h:3513
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2214
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
Definition: Decl.h:2382
CharUnits getAlignment() const
Definition: CGValue.h:319
RequiredArgs getRequiredArgs() const
bool isUsingInAlloca() const
Returns if we&#39;re using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:259
unsigned getFunctionScopeIndex() const
Returns the index of this parameter in its prototype or method scope.
Definition: Decl.h:1500
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isOrdinary() const
Definition: CGCall.h:150
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:269
CharUnits getArgStructAlignment() const
bool isReferenceType() const
Definition: Type.h:5775
Interesting information about a specific parameter that can&#39;t simply be reflected in parameter&#39;s type...
Definition: Type.h:3191
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2204
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that&#39;s being passed call-by-writeback.
Definition: CGCall.cpp:3152
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:241
bool getProducesResult() const
Definition: Type.h:3035
llvm::FunctionType * getFunctionType() const
Definition: CGCall.h:161
bool isGLValue() const
Definition: Expr.h:252
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:288
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2542
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:157
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
uint32_t Offset
Definition: CacheTokens.cpp:43
llvm::StructType * getCoerceAndExpandType() const
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:222
Wrapper for source info for functions.
Definition: TypeLoc.h:1357
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:108
unsigned getInAllocaFieldIndex() const
bool isComplex() const
Definition: CGValue.h:53
const_arg_iterator arg_begin() const
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:64
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:378
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1780
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:259
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:134
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:455
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:689
Values of this type can never be null.
bool isNothrow(const ASTContext &Ctx, bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:3492
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
bool isSimple() const
Definition: CGValue.h:255
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:263
bool isInstance() const
Definition: DeclCXX.h:1945
An ordinary object is located at an address in memory.
Definition: Specifiers.h:123
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1437
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:91
FunctionType::ExtInfo getExtInfo() const
QualType getReturnType() const
Definition: DeclObjC.h:330
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:724
bool getNoReturn() const
Definition: Type.h:3034
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:70
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:71
bool getNoCallerSavedRegs() const
Definition: Type.h:3036
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3466
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e.g., it is an unsigned integer type or a vector.
Definition: Type.cpp:1835
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:497
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Derives the &#39;this&#39; type for codegen purposes, i.e.
Definition: CGCall.cpp:73
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3083
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:486
Represents a K&R-style &#39;int foo()&#39; function, which has no information available about its arguments...
Definition: Type.h:3136
bool hasAttr() const
Definition: DeclBase.h:521
CanQualType getReturnType() const
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:329
bool isValid() const
Definition: Address.h:36
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1569
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3170
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:3802
const TargetCodeGenInfo & getTargetCodeGenInfo()
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
writeback_const_range writebacks() const
Definition: CGCall.h:237
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:226
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:3013
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:3779
Address Temporary
The temporary alloca.
Definition: CGCall.h:193
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:106
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:196
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:2993
Expr - This represents one expression.
Definition: Expr.h:106
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2540
static Address invalid()
Definition: Address.h:35
llvm::Type * getUnpaddedCoerceAndExpandType() const
const FunctionProtoType * T
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:2988
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:552
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:88
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:66
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:682
bool getHasRegParm() const
Definition: Type.h:3037
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6162
bool isObjCRetainableType() const
Definition: Type.cpp:3789
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2566
llvm::Constant * objc_retain
id objc_retain(id);
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
void add(RValue rvalue, QualType type, bool needscopy=false)
Definition: CGCall.h:207
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:344
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2530
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when &#39;sret&#39; is used as a return type...
Definition: CGCall.cpp:1485
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:627
QualType getType() const
Definition: Expr.h:128
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1327
bool hasNonTrivialDestructor() const
Determine whether this class has a non-trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1367
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2687
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:197
void Profile(llvm::FoldingSetNodeID &ID)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:918
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1717
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:1922
ASTContext & getContext() const
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:408
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:499
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1143
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:233
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:29
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1249
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:412
CanQualType getCanonicalTypeUnqualified() const
std::string CPU
If given, the name of the target CPU to generate code for.
Definition: TargetOptions.h:36
The l-value was considered opaque, so the alignment was determined from a type.
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1274
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:144
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses &#39;fpret&#39; when used as a return type.
Definition: CGCall.cpp:1490
CanProxy< U > castAs() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3141
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3272
Encodes a location in the source.
QualType getReturnType() const
Definition: Type.h:3106
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2088
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:278
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3657
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1230
CallingConv getCC() const
Definition: Type.h:3044
const Decl * getDecl() const
Definition: GlobalDecl.h:62
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C &#39;SEL&#39; type.
Definition: ASTContext.h:1771
An aggregate value slot.
Definition: CGValue.h:438
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:442
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:1918
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2272
const_arg_iterator arg_end() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ObjCEntrypoints & getObjCEntrypoints() const
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3280
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings startin...
Definition: TargetOptions.h:55
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:1781
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:706
CanQualType VoidTy
Definition: ASTContext.h:965
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
bool isAnyPointerType() const
Definition: Type.h:5769
An aligned address.
Definition: Address.h:25
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:558
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:178
All available information about a concrete callee.
Definition: CGCall.h:66
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:352
Complete object dtor.
Definition: ABI.h:36
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses &#39;fp2ret&#39; when used as a return type.
Definition: CGCall.cpp:1507
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1668
bool hasFlexibleArrayMember() const
Definition: Decl.h:3436
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3426
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:607
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:533
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1107
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:276
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:176
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
const CGCalleeInfo & getAbstractInfo() const
Definition: CGCall.h:153
Dataflow Directional Tag Classes.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2343
uint64_t SanitizerMask
Definition: Sanitizers.h:24
ExtInfo getExtInfo() const
Definition: Type.h:3115
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:93
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Definition: ASTMatchers.h:2154
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
LValue Source
The original argument.
Definition: CGCall.h:190
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:419
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3626
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:91
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:988
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:167
void EmitARCIntrinsicUse(ArrayRef< llvm::Value *> values)
Given a number of pointers, inform the optimizer that they&#39;re being intrinsically used up until this ...
Definition: CGObjC.cpp:1803
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change...
Definition: TargetCXXABI.h:216
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2033
SourceLocation getLocStart() const LLVM_READONLY
Definition: Decl.h:696
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:783
CallingConv getDefaultCallingConvention(bool isVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:388
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
static void getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, SmallVectorImpl< llvm::OperandBundleDef > &BundleList)
Definition: CGCall.cpp:3597
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isVolatileQualified() const
Definition: CGValue.h:56
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:3839
Complex values, per C99 6.2.5p11.
Definition: Type.h:2162
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
static bool classof(const OMPClause *T)
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:1941
QualType getCanonicalTypeInternal() const
Definition: Type.h:2043
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Definition: CGCXXABI.h:128
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:5985
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable &#39;self&#39;, remove it.
Definition: CGCall.cpp:2630
CharUnits getIndirectAlign() const
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:44
T * getAttr() const
Definition: DeclBase.h:518
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:640
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:120
llvm::Value * getAggregatePointer() const
Definition: CGValue.h:76
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
Expand - Only valid for aggregate argument types.
const CGFunctionInfo & arrangeMSMemberPointerThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes &#39;this&#39; as the first parameter followed by varargs.
Definition: CGCall.cpp:516
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2358
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type *>::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:966
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:889
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:436
Represents a base class of a C++ class.
Definition: DeclCXX.h:157
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
RValue asAggregateRValue() const
Definition: CGValue.h:432
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:1961
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:1951
ASTContext & getContext() const
Definition: CodeGenTypes.h:174
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:133
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:505
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl...
Definition: CGCall.cpp:1652
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g., it is an signed integer type or a vector.
Definition: Type.cpp:1795
Represents a C++ struct/union/class.
Definition: DeclCXX.h:266
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:329
bool isVoidType() const
Definition: Type.h:5963
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2181
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:5570
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1107
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
This class is used for builtin types like &#39;int&#39;.
Definition: Type.h:2082
bool isVariadic() const
Definition: DeclObjC.h:418
bool shouldCopy() const
shouldCopy - True if we should do the &#39;copy&#39; part of the copy-restore.
Definition: ExprObjC.h:1494
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1623
Copying closure variant of a ctor.
Definition: ABI.h:29
Defines the clang::TargetInfo interface.
StringRef getName() const
getName - Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:237
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows)
Definition: CGCall.cpp:194
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
Definition: CGCall.cpp:526
CanQualType IntTy
Definition: ASTContext.h:973
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration&#39;s cl...
Definition: DeclCXX.h:3181
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
Definition: CGCall.cpp:3129
const FunctionProtoType * getCalleeFunctionProtoType() const
Definition: CGCall.h:59
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
static RValue get(llvm::Value *V)
Definition: CGValue.h:86
bool isUnion() const
Definition: Decl.h:3058
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional, const FunctionDecl *FD)
Compute the arguments required by the given formal prototype, given that there may be some additional...
bool isPointerType() const
Definition: Type.h:5766
unsigned getNumRequiredArgs() const
unsigned getDirectOffset() const
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments...
Definition: CGCall.cpp:597
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CXXCtorType toCXXCtorType(StructorType T)
Definition: CodeGenTypes.h:65
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
QualType getType() const
Definition: Decl.h:602
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:107
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
LValue - This represents an lvalue references.
Definition: CGValue.h:172
An abstract representation of regular/ObjC call/message targets.
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:147
RValue asRValue() const
Definition: CGValue.h:575
llvm::Type * getCoerceToType() const
Notes how many arguments were added to the beginning (Prefix) and ending (Suffix) of an arg list...
Definition: CGCXXABI.h:299
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2358
void AddDefaultFnAttrs(llvm::Function &F)
Adds attributes to F according to our CodeGenOptions and LangOptions, as though we had emitted it our...
Definition: CGCall.cpp:1772
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:182
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:662
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:2551
Abstract information about a function or function prototype.
Definition: CGCall.h:44
A class which abstracts out some details necessary for making a call.
Definition: Type.h:2989
bool isScalar() const
Definition: CGValue.h:52
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke=nullptr)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3708
This parameter (which must have pointer type) is a Swift indirect result parameter.
ConstructorUsingShadowDecl * getShadowDecl() const
Definition: DeclCXX.h:2330
ArrayRef< ParmVarDecl * > parameters() const
Definition: DeclObjC.h:371