clang  9.0.0svn
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 using namespace clang;
40 using namespace CodeGen;
41 
42 /***/
43 
45  switch (CC) {
46  default: return llvm::CallingConv::C;
47  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
48  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
49  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
50  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
51  case CC_Win64: return llvm::CallingConv::Win64;
52  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
53  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
54  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
55  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
56  // TODO: Add support for __pascal to LLVM.
58  // TODO: Add support for __vectorcall to LLVM.
59  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
60  case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
61  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
64  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
65  case CC_Swift: return llvm::CallingConv::Swift;
66  }
67 }
68 
69 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
70 /// qualification. Either or both of RD and MD may be null. A null RD indicates
71 /// that there is no meaningful 'this' type, and a null MD can occur when
72 /// calling a method pointer.
74  const CXXMethodDecl *MD) {
75  QualType RecTy;
76  if (RD)
77  RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
78  else
79  RecTy = Context.VoidTy;
80 
81  if (MD)
82  RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
83  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
84 }
85 
86 /// Returns the canonical formal type of the given C++ method.
88  return MD->getType()->getCanonicalTypeUnqualified()
90 }
91 
92 /// Returns the "extra-canonicalized" return type, which discards
93 /// qualifiers on the return type. Codegen doesn't care about them,
94 /// and it makes ABI code a little easier to be able to assume that
95 /// all parameter and return types are top-level unqualified.
98 }
99 
100 /// Arrange the argument and result information for a value of the given
101 /// unprototyped freestanding function type.
102 const CGFunctionInfo &
104  // When translating an unprototyped function type, always use a
105  // variadic type.
106  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
107  /*instanceMethod=*/false,
108  /*chainCall=*/false, None,
109  FTNP->getExtInfo(), {}, RequiredArgs(0));
110 }
111 
114  const FunctionProtoType *proto,
115  unsigned prefixArgs,
116  unsigned totalArgs) {
117  assert(proto->hasExtParameterInfos());
118  assert(paramInfos.size() <= prefixArgs);
119  assert(proto->getNumParams() + prefixArgs <= totalArgs);
120 
121  paramInfos.reserve(totalArgs);
122 
123  // Add default infos for any prefix args that don't already have infos.
124  paramInfos.resize(prefixArgs);
125 
126  // Add infos for the prototype.
127  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
128  paramInfos.push_back(ParamInfo);
129  // pass_object_size params have no parameter info.
130  if (ParamInfo.hasPassObjectSize())
131  paramInfos.emplace_back();
132  }
133 
134  assert(paramInfos.size() <= totalArgs &&
135  "Did we forget to insert pass_object_size args?");
136  // Add default infos for the variadic and/or suffix arguments.
137  paramInfos.resize(totalArgs);
138 }
139 
140 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
141 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
142 static void appendParameterTypes(const CodeGenTypes &CGT,
146  // Fast path: don't touch param info if we don't need to.
147  if (!FPT->hasExtParameterInfos()) {
148  assert(paramInfos.empty() &&
149  "We have paramInfos, but the prototype doesn't?");
150  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
151  return;
152  }
153 
154  unsigned PrefixSize = prefix.size();
155  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
156  // parameters; the only thing that can change this is the presence of
157  // pass_object_size. So, we preallocate for the common case.
158  prefix.reserve(prefix.size() + FPT->getNumParams());
159 
160  auto ExtInfos = FPT->getExtParameterInfos();
161  assert(ExtInfos.size() == FPT->getNumParams());
162  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
163  prefix.push_back(FPT->getParamType(I));
164  if (ExtInfos[I].hasPassObjectSize())
165  prefix.push_back(CGT.getContext().getSizeType());
166  }
167 
168  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
169  prefix.size());
170 }
171 
172 /// Arrange the LLVM function layout for a value of the given function
173 /// type, on top of any implicit parameters already stored.
174 static const CGFunctionInfo &
175 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
179  RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
180  // FIXME: Kill copy.
181  appendParameterTypes(CGT, prefix, paramInfos, FTP);
182  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
183 
184  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
185  /*chainCall=*/false, prefix,
186  FTP->getExtInfo(), paramInfos,
187  Required);
188 }
189 
190 /// Arrange the argument and result information for a value of the
191 /// given freestanding function type.
192 const CGFunctionInfo &
195  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
196  FTP);
197 }
198 
199 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
200  // Set the appropriate calling convention for the Function.
201  if (D->hasAttr<StdCallAttr>())
202  return CC_X86StdCall;
203 
204  if (D->hasAttr<FastCallAttr>())
205  return CC_X86FastCall;
206 
207  if (D->hasAttr<RegCallAttr>())
208  return CC_X86RegCall;
209 
210  if (D->hasAttr<ThisCallAttr>())
211  return CC_X86ThisCall;
212 
213  if (D->hasAttr<VectorCallAttr>())
214  return CC_X86VectorCall;
215 
216  if (D->hasAttr<PascalAttr>())
217  return CC_X86Pascal;
218 
219  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
220  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
221 
222  if (D->hasAttr<AArch64VectorPcsAttr>())
223  return CC_AArch64VectorCall;
224 
225  if (D->hasAttr<IntelOclBiccAttr>())
226  return CC_IntelOclBicc;
227 
228  if (D->hasAttr<MSABIAttr>())
229  return IsWindows ? CC_C : CC_Win64;
230 
231  if (D->hasAttr<SysVABIAttr>())
232  return IsWindows ? CC_X86_64SysV : CC_C;
233 
234  if (D->hasAttr<PreserveMostAttr>())
235  return CC_PreserveMost;
236 
237  if (D->hasAttr<PreserveAllAttr>())
238  return CC_PreserveAll;
239 
240  return CC_C;
241 }
242 
243 /// Arrange the argument and result information for a call to an
244 /// unknown C++ non-static member function of the given abstract type.
245 /// (A null RD means we don't have any meaningful "this" argument type,
246 /// so fall back to a generic pointer type).
247 /// The member function must be an ordinary function, i.e. not a
248 /// constructor or destructor.
249 const CGFunctionInfo &
251  const FunctionProtoType *FTP,
252  const CXXMethodDecl *MD) {
254 
255  // Add the 'this' pointer.
256  argTypes.push_back(DeriveThisType(RD, MD));
257 
259  *this, true, argTypes,
261 }
262 
263 /// Set calling convention for CUDA/HIP kernel.
265  const FunctionDecl *FD) {
266  if (FD->hasAttr<CUDAGlobalAttr>()) {
267  const FunctionType *FT = FTy->getAs<FunctionType>();
269  FTy = FT->getCanonicalTypeUnqualified();
270  }
271 }
272 
273 /// Arrange the argument and result information for a declaration or
274 /// definition of the given C++ non-static member function. The
275 /// member function must be an ordinary function, i.e. not a
276 /// constructor or destructor.
277 const CGFunctionInfo &
279  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
280  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
281 
282  CanQualType FT = GetFormalType(MD).getAs<Type>();
283  setCUDAKernelCallingConvention(FT, CGM, MD);
284  auto prototype = FT.getAs<FunctionProtoType>();
285 
286  if (MD->isInstance()) {
287  // The abstract case is perfectly fine.
288  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
289  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
290  }
291 
292  return arrangeFreeFunctionType(prototype);
293 }
294 
296  const InheritedConstructor &Inherited, CXXCtorType Type) {
297  // Parameters are unnecessary if we're constructing a base class subobject
298  // and the inherited constructor lives in a virtual base.
299  return Type == Ctor_Complete ||
300  !Inherited.getShadowDecl()->constructsVirtualBase() ||
301  !Target.getCXXABI().hasConstructorVariants();
302  }
303 
304 const CGFunctionInfo &
306  StructorType Type) {
307 
310  argTypes.push_back(DeriveThisType(MD->getParent(), MD));
311 
312  bool PassParams = true;
313 
314  GlobalDecl GD;
315  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
316  GD = GlobalDecl(CD, toCXXCtorType(Type));
317 
318  // A base class inheriting constructor doesn't get forwarded arguments
319  // needed to construct a virtual base (or base class thereof).
320  if (auto Inherited = CD->getInheritedConstructor())
321  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
322  } else {
323  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
324  GD = GlobalDecl(DD, toCXXDtorType(Type));
325  }
326 
328 
329  // Add the formal parameters.
330  if (PassParams)
331  appendParameterTypes(*this, argTypes, paramInfos, FTP);
332 
333  CGCXXABI::AddedStructorArgs AddedArgs =
334  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
335  if (!paramInfos.empty()) {
336  // Note: prefix implies after the first param.
337  if (AddedArgs.Prefix)
338  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
340  if (AddedArgs.Suffix)
341  paramInfos.append(AddedArgs.Suffix,
343  }
344 
345  RequiredArgs required =
346  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
348 
349  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
350  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
351  ? argTypes.front()
352  : TheCXXABI.hasMostDerivedReturn(GD)
353  ? CGM.getContext().VoidPtrTy
354  : Context.VoidTy;
355  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
356  /*chainCall=*/false, argTypes, extInfo,
357  paramInfos, required);
358 }
359 
363  for (auto &arg : args)
364  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
365  return argTypes;
366 }
367 
371  for (auto &arg : args)
372  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
373  return argTypes;
374 }
375 
378  unsigned prefixArgs, unsigned totalArgs) {
380  if (proto->hasExtParameterInfos()) {
381  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
382  }
383  return result;
384 }
385 
386 /// Arrange a call to a C++ method, passing the given arguments.
387 ///
388 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
389 /// parameter.
390 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
391 /// args.
392 /// PassProtoArgs indicates whether `args` has args for the parameters in the
393 /// given CXXConstructorDecl.
394 const CGFunctionInfo &
396  const CXXConstructorDecl *D,
397  CXXCtorType CtorKind,
398  unsigned ExtraPrefixArgs,
399  unsigned ExtraSuffixArgs,
400  bool PassProtoArgs) {
401  // FIXME: Kill copy.
403  for (const auto &Arg : args)
404  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
405 
406  // +1 for implicit this, which should always be args[0].
407  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
408 
410  RequiredArgs Required = PassProtoArgs
412  FPT, TotalPrefixArgs + ExtraSuffixArgs)
414 
415  GlobalDecl GD(D, CtorKind);
416  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
417  ? ArgTypes.front()
418  : TheCXXABI.hasMostDerivedReturn(GD)
419  ? CGM.getContext().VoidPtrTy
420  : Context.VoidTy;
421 
422  FunctionType::ExtInfo Info = FPT->getExtInfo();
424  // If the prototype args are elided, we should only have ABI-specific args,
425  // which never have param info.
426  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
427  // ABI-specific suffix arguments are treated the same as variadic arguments.
428  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
429  ArgTypes.size());
430  }
431  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
432  /*chainCall=*/false, ArgTypes, Info,
433  ParamInfos, Required);
434 }
435 
436 /// Arrange the argument and result information for the declaration or
437 /// definition of the given function.
438 const CGFunctionInfo &
440  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
441  if (MD->isInstance())
442  return arrangeCXXMethodDeclaration(MD);
443 
445 
446  assert(isa<FunctionType>(FTy));
447  setCUDAKernelCallingConvention(FTy, CGM, FD);
448 
449  // When declaring a function without a prototype, always use a
450  // non-variadic type.
453  noProto->getReturnType(), /*instanceMethod=*/false,
454  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
455  }
456 
458 }
459 
460 /// Arrange the argument and result information for the declaration or
461 /// definition of an Objective-C method.
462 const CGFunctionInfo &
464  // It happens that this is the same as a call with no optional
465  // arguments, except also using the formal 'self' type.
467 }
468 
469 /// Arrange the argument and result information for the function type
470 /// through which to perform a send to the given Objective-C method,
471 /// using the given receiver type. The receiver type is not always
472 /// the 'self' type of the method or even an Objective-C pointer type.
473 /// This is *not* the right method for actually performing such a
474 /// message send, due to the possibility of optional arguments.
475 const CGFunctionInfo &
477  QualType receiverType) {
480  argTys.push_back(Context.getCanonicalParamType(receiverType));
481  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
482  // FIXME: Kill copy?
483  for (const auto *I : MD->parameters()) {
484  argTys.push_back(Context.getCanonicalParamType(I->getType()));
486  I->hasAttr<NoEscapeAttr>());
487  extParamInfos.push_back(extParamInfo);
488  }
489 
490  FunctionType::ExtInfo einfo;
491  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
492  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
493 
494  if (getContext().getLangOpts().ObjCAutoRefCount &&
495  MD->hasAttr<NSReturnsRetainedAttr>())
496  einfo = einfo.withProducesResult(true);
497 
498  RequiredArgs required =
499  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
500 
502  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
503  /*chainCall=*/false, argTys, einfo, extParamInfos, required);
504 }
505 
506 const CGFunctionInfo &
508  const CallArgList &args) {
509  auto argTypes = getArgTypesForCall(Context, args);
510  FunctionType::ExtInfo einfo;
511 
513  GetReturnType(returnType), /*instanceMethod=*/false,
514  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
515 }
516 
517 const CGFunctionInfo &
519  // FIXME: Do we need to handle ObjCMethodDecl?
520  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
521 
522  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
524 
525  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
527 
528  return arrangeFunctionDeclaration(FD);
529 }
530 
531 /// Arrange a thunk that takes 'this' as the first parameter followed by
532 /// varargs. Return a void pointer, regardless of the actual return type.
533 /// The body of the thunk will end in a musttail call to a function of the
534 /// correct type, and the caller will bitcast the function to the correct
535 /// prototype.
536 const CGFunctionInfo &
538  assert(MD->isVirtual() && "only methods have thunks");
540  CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
541  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
542  /*chainCall=*/false, ArgTys,
543  FTP->getExtInfo(), {}, RequiredArgs(1));
544 }
545 
546 const CGFunctionInfo &
548  CXXCtorType CT) {
549  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
550 
553  const CXXRecordDecl *RD = CD->getParent();
554  ArgTys.push_back(DeriveThisType(RD, CD));
555  if (CT == Ctor_CopyingClosure)
556  ArgTys.push_back(*FTP->param_type_begin());
557  if (RD->getNumVBases() > 0)
558  ArgTys.push_back(Context.IntTy);
560  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
561  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
562  /*chainCall=*/false, ArgTys,
563  FunctionType::ExtInfo(CC), {},
565 }
566 
567 /// Arrange a call as unto a free function, except possibly with an
568 /// additional number of formal parameters considered required.
569 static const CGFunctionInfo &
571  CodeGenModule &CGM,
572  const CallArgList &args,
573  const FunctionType *fnType,
574  unsigned numExtraRequiredArgs,
575  bool chainCall) {
576  assert(args.size() >= numExtraRequiredArgs);
577 
579 
580  // In most cases, there are no optional arguments.
581  RequiredArgs required = RequiredArgs::All;
582 
583  // If we have a variadic prototype, the required arguments are the
584  // extra prefix plus the arguments in the prototype.
585  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
586  if (proto->isVariadic())
587  required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
588 
589  if (proto->hasExtParameterInfos())
590  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
591  args.size());
592 
593  // If we don't have a prototype at all, but we're supposed to
594  // explicitly use the variadic convention for unprototyped calls,
595  // treat all of the arguments as required but preserve the nominal
596  // possibility of variadics.
597  } else if (CGM.getTargetCodeGenInfo()
598  .isNoProtoCallVariadic(args,
599  cast<FunctionNoProtoType>(fnType))) {
600  required = RequiredArgs(args.size());
601  }
602 
603  // FIXME: Kill copy.
605  for (const auto &arg : args)
606  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
608  /*instanceMethod=*/false, chainCall,
609  argTypes, fnType->getExtInfo(), paramInfos,
610  required);
611 }
612 
613 /// Figure out the rules for calling a function with the given formal
614 /// type using the given arguments. The arguments are necessary
615 /// because the function might be unprototyped, in which case it's
616 /// target-dependent in crazy ways.
617 const CGFunctionInfo &
619  const FunctionType *fnType,
620  bool chainCall) {
621  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
622  chainCall ? 1 : 0, chainCall);
623 }
624 
625 /// A block function is essentially a free function with an
626 /// extra implicit argument.
627 const CGFunctionInfo &
629  const FunctionType *fnType) {
630  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
631  /*chainCall=*/false);
632 }
633 
634 const CGFunctionInfo &
636  const FunctionArgList &params) {
637  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
638  auto argTypes = getArgTypesForDeclaration(Context, params);
639 
641  /*instanceMethod*/ false, /*chainCall*/ false,
642  argTypes, proto->getExtInfo(), paramInfos,
644 }
645 
646 const CGFunctionInfo &
648  const CallArgList &args) {
649  // FIXME: Kill copy.
651  for (const auto &Arg : args)
652  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
654  GetReturnType(resultType), /*instanceMethod=*/false,
655  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
656  /*paramInfos=*/ {}, RequiredArgs::All);
657 }
658 
659 const CGFunctionInfo &
661  const FunctionArgList &args) {
662  auto argTypes = getArgTypesForDeclaration(Context, args);
663 
665  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
666  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
667 }
668 
669 const CGFunctionInfo &
671  ArrayRef<CanQualType> argTypes) {
673  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
674  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
675 }
676 
677 /// Arrange a call to a C++ method, passing the given arguments.
678 ///
679 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
680 /// does not count `this`.
681 const CGFunctionInfo &
683  const FunctionProtoType *proto,
684  RequiredArgs required,
685  unsigned numPrefixArgs) {
686  assert(numPrefixArgs + 1 <= args.size() &&
687  "Emitting a call with less args than the required prefix?");
688  // Add one to account for `this`. It's a bit awkward here, but we don't count
689  // `this` in similar places elsewhere.
690  auto paramInfos =
691  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
692 
693  // FIXME: Kill copy.
694  auto argTypes = getArgTypesForCall(Context, args);
695 
696  FunctionType::ExtInfo info = proto->getExtInfo();
698  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
699  /*chainCall=*/false, argTypes, info, paramInfos, required);
700 }
701 
704  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
706 }
707 
708 const CGFunctionInfo &
710  const CallArgList &args) {
711  assert(signature.arg_size() <= args.size());
712  if (signature.arg_size() == args.size())
713  return signature;
714 
716  auto sigParamInfos = signature.getExtParameterInfos();
717  if (!sigParamInfos.empty()) {
718  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
719  paramInfos.resize(args.size());
720  }
721 
722  auto argTypes = getArgTypesForCall(Context, args);
723 
724  assert(signature.getRequiredArgs().allowsOptionalArgs());
725  return arrangeLLVMFunctionInfo(signature.getReturnType(),
726  signature.isInstanceMethod(),
727  signature.isChainCall(),
728  argTypes,
729  signature.getExtInfo(),
730  paramInfos,
731  signature.getRequiredArgs());
732 }
733 
734 namespace clang {
735 namespace CodeGen {
737 }
738 }
739 
740 /// Arrange the argument and result information for an abstract value
741 /// of a given function type. This is the method which all of the
742 /// above functions ultimately defer to.
743 const CGFunctionInfo &
745  bool instanceMethod,
746  bool chainCall,
747  ArrayRef<CanQualType> argTypes,
750  RequiredArgs required) {
751  assert(llvm::all_of(argTypes,
752  [](CanQualType T) { return T.isCanonicalAsParam(); }));
753 
754  // Lookup or create unique function info.
755  llvm::FoldingSetNodeID ID;
756  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
757  required, resultType, argTypes);
758 
759  void *insertPos = nullptr;
760  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
761  if (FI)
762  return *FI;
763 
764  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
765 
766  // Construct the function info. We co-allocate the ArgInfos.
767  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
768  paramInfos, resultType, argTypes, required);
769  FunctionInfos.InsertNode(FI, insertPos);
770 
771  bool inserted = FunctionsBeingProcessed.insert(FI).second;
772  (void)inserted;
773  assert(inserted && "Recursively being processed?");
774 
775  // Compute ABI information.
776  if (CC == llvm::CallingConv::SPIR_KERNEL) {
777  // Force target independent argument handling for the host visible
778  // kernel functions.
779  computeSPIRKernelABIInfo(CGM, *FI);
780  } else if (info.getCC() == CC_Swift) {
781  swiftcall::computeABIInfo(CGM, *FI);
782  } else {
783  getABIInfo().computeInfo(*FI);
784  }
785 
786  // Loop over all of the computed argument and return value info. If any of
787  // them are direct or extend without a specified coerce type, specify the
788  // default now.
789  ABIArgInfo &retInfo = FI->getReturnInfo();
790  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
791  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
792 
793  for (auto &I : FI->arguments())
794  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
795  I.info.setCoerceToType(ConvertType(I.type));
796 
797  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
798  assert(erased && "Not in set?");
799 
800  return *FI;
801 }
802 
804  bool instanceMethod,
805  bool chainCall,
806  const FunctionType::ExtInfo &info,
807  ArrayRef<ExtParameterInfo> paramInfos,
808  CanQualType resultType,
809  ArrayRef<CanQualType> argTypes,
810  RequiredArgs required) {
811  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
812  assert(!required.allowsOptionalArgs() ||
813  required.getNumRequiredArgs() <= argTypes.size());
814 
815  void *buffer =
816  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
817  argTypes.size() + 1, paramInfos.size()));
818 
819  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
820  FI->CallingConvention = llvmCC;
821  FI->EffectiveCallingConvention = llvmCC;
822  FI->ASTCallingConvention = info.getCC();
823  FI->InstanceMethod = instanceMethod;
824  FI->ChainCall = chainCall;
825  FI->NoReturn = info.getNoReturn();
826  FI->ReturnsRetained = info.getProducesResult();
827  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
828  FI->NoCfCheck = info.getNoCfCheck();
829  FI->Required = required;
830  FI->HasRegParm = info.getHasRegParm();
831  FI->RegParm = info.getRegParm();
832  FI->ArgStruct = nullptr;
833  FI->ArgStructAlign = 0;
834  FI->NumArgs = argTypes.size();
835  FI->HasExtParameterInfos = !paramInfos.empty();
836  FI->getArgsBuffer()[0].type = resultType;
837  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
838  FI->getArgsBuffer()[i + 1].type = argTypes[i];
839  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
840  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
841  return FI;
842 }
843 
844 /***/
845 
846 namespace {
847 // ABIArgInfo::Expand implementation.
848 
849 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
850 struct TypeExpansion {
851  enum TypeExpansionKind {
852  // Elements of constant arrays are expanded recursively.
853  TEK_ConstantArray,
854  // Record fields are expanded recursively (but if record is a union, only
855  // the field with the largest size is expanded).
856  TEK_Record,
857  // For complex types, real and imaginary parts are expanded recursively.
858  TEK_Complex,
859  // All other types are not expandable.
860  TEK_None
861  };
862 
863  const TypeExpansionKind Kind;
864 
865  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
866  virtual ~TypeExpansion() {}
867 };
868 
869 struct ConstantArrayExpansion : TypeExpansion {
870  QualType EltTy;
871  uint64_t NumElts;
872 
873  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
874  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
875  static bool classof(const TypeExpansion *TE) {
876  return TE->Kind == TEK_ConstantArray;
877  }
878 };
879 
880 struct RecordExpansion : TypeExpansion {
882 
884 
885  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
887  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
888  Fields(std::move(Fields)) {}
889  static bool classof(const TypeExpansion *TE) {
890  return TE->Kind == TEK_Record;
891  }
892 };
893 
894 struct ComplexExpansion : TypeExpansion {
895  QualType EltTy;
896 
897  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
898  static bool classof(const TypeExpansion *TE) {
899  return TE->Kind == TEK_Complex;
900  }
901 };
902 
903 struct NoExpansion : TypeExpansion {
904  NoExpansion() : TypeExpansion(TEK_None) {}
905  static bool classof(const TypeExpansion *TE) {
906  return TE->Kind == TEK_None;
907  }
908 };
909 } // namespace
910 
911 static std::unique_ptr<TypeExpansion>
912 getTypeExpansion(QualType Ty, const ASTContext &Context) {
913  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
914  return llvm::make_unique<ConstantArrayExpansion>(
915  AT->getElementType(), AT->getSize().getZExtValue());
916  }
917  if (const RecordType *RT = Ty->getAs<RecordType>()) {
920  const RecordDecl *RD = RT->getDecl();
921  assert(!RD->hasFlexibleArrayMember() &&
922  "Cannot expand structure with flexible array.");
923  if (RD->isUnion()) {
924  // Unions can be here only in degenerative cases - all the fields are same
925  // after flattening. Thus we have to use the "largest" field.
926  const FieldDecl *LargestFD = nullptr;
927  CharUnits UnionSize = CharUnits::Zero();
928 
929  for (const auto *FD : RD->fields()) {
930  if (FD->isZeroLengthBitField(Context))
931  continue;
932  assert(!FD->isBitField() &&
933  "Cannot expand structure with bit-field members.");
934  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
935  if (UnionSize < FieldSize) {
936  UnionSize = FieldSize;
937  LargestFD = FD;
938  }
939  }
940  if (LargestFD)
941  Fields.push_back(LargestFD);
942  } else {
943  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
944  assert(!CXXRD->isDynamicClass() &&
945  "cannot expand vtable pointers in dynamic classes");
946  for (const CXXBaseSpecifier &BS : CXXRD->bases())
947  Bases.push_back(&BS);
948  }
949 
950  for (const auto *FD : RD->fields()) {
951  if (FD->isZeroLengthBitField(Context))
952  continue;
953  assert(!FD->isBitField() &&
954  "Cannot expand structure with bit-field members.");
955  Fields.push_back(FD);
956  }
957  }
958  return llvm::make_unique<RecordExpansion>(std::move(Bases),
959  std::move(Fields));
960  }
961  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
962  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
963  }
964  return llvm::make_unique<NoExpansion>();
965 }
966 
967 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
968  auto Exp = getTypeExpansion(Ty, Context);
969  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
970  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
971  }
972  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
973  int Res = 0;
974  for (auto BS : RExp->Bases)
975  Res += getExpansionSize(BS->getType(), Context);
976  for (auto FD : RExp->Fields)
977  Res += getExpansionSize(FD->getType(), Context);
978  return Res;
979  }
980  if (isa<ComplexExpansion>(Exp.get()))
981  return 2;
982  assert(isa<NoExpansion>(Exp.get()));
983  return 1;
984 }
985 
986 void
989  auto Exp = getTypeExpansion(Ty, Context);
990  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
991  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
992  getExpandedTypes(CAExp->EltTy, TI);
993  }
994  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
995  for (auto BS : RExp->Bases)
996  getExpandedTypes(BS->getType(), TI);
997  for (auto FD : RExp->Fields)
998  getExpandedTypes(FD->getType(), TI);
999  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1000  llvm::Type *EltTy = ConvertType(CExp->EltTy);
1001  *TI++ = EltTy;
1002  *TI++ = EltTy;
1003  } else {
1004  assert(isa<NoExpansion>(Exp.get()));
1005  *TI++ = ConvertType(Ty);
1006  }
1007 }
1008 
1010  ConstantArrayExpansion *CAE,
1011  Address BaseAddr,
1012  llvm::function_ref<void(Address)> Fn) {
1013  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1014  CharUnits EltAlign =
1015  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1016 
1017  for (int i = 0, n = CAE->NumElts; i < n; i++) {
1018  llvm::Value *EltAddr =
1019  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1020  Fn(Address(EltAddr, EltAlign));
1021  }
1022 }
1023 
1024 void CodeGenFunction::ExpandTypeFromArgs(
1026  assert(LV.isSimple() &&
1027  "Unexpected non-simple lvalue during struct expansion.");
1028 
1029  auto Exp = getTypeExpansion(Ty, getContext());
1030  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1031  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1032  [&](Address EltAddr) {
1033  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1034  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1035  });
1036  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1037  Address This = LV.getAddress();
1038  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1039  // Perform a single step derived-to-base conversion.
1040  Address Base =
1041  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1042  /*NullCheckValue=*/false, SourceLocation());
1043  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1044 
1045  // Recurse onto bases.
1046  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1047  }
1048  for (auto FD : RExp->Fields) {
1049  // FIXME: What are the right qualifiers here?
1050  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1051  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1052  }
1053  } else if (isa<ComplexExpansion>(Exp.get())) {
1054  auto realValue = *AI++;
1055  auto imagValue = *AI++;
1056  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1057  } else {
1058  assert(isa<NoExpansion>(Exp.get()));
1059  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1060  }
1061 }
1062 
1063 void CodeGenFunction::ExpandTypeToArgs(
1064  QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1065  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1066  auto Exp = getTypeExpansion(Ty, getContext());
1067  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1068  Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1071  *this, CAExp, Addr, [&](Address EltAddr) {
1072  CallArg EltArg = CallArg(
1073  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1074  CAExp->EltTy);
1075  ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1076  IRCallArgPos);
1077  });
1078  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1079  Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1081  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1082  // Perform a single step derived-to-base conversion.
1083  Address Base =
1084  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1085  /*NullCheckValue=*/false, SourceLocation());
1086  CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1087 
1088  // Recurse onto bases.
1089  ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1090  IRCallArgPos);
1091  }
1092 
1093  LValue LV = MakeAddrLValue(This, Ty);
1094  for (auto FD : RExp->Fields) {
1095  CallArg FldArg =
1096  CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1097  ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1098  IRCallArgPos);
1099  }
1100  } else if (isa<ComplexExpansion>(Exp.get())) {
1102  IRCallArgs[IRCallArgPos++] = CV.first;
1103  IRCallArgs[IRCallArgPos++] = CV.second;
1104  } else {
1105  assert(isa<NoExpansion>(Exp.get()));
1106  auto RV = Arg.getKnownRValue();
1107  assert(RV.isScalar() &&
1108  "Unexpected non-scalar rvalue during struct expansion.");
1109 
1110  // Insert a bitcast as needed.
1111  llvm::Value *V = RV.getScalarVal();
1112  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1113  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1114  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1115 
1116  IRCallArgs[IRCallArgPos++] = V;
1117  }
1118 }
1119 
1120 /// Create a temporary allocation for the purposes of coercion.
1122  CharUnits MinAlign) {
1123  // Don't use an alignment that's worse than what LLVM would prefer.
1124  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1125  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1126 
1127  return CGF.CreateTempAlloca(Ty, Align);
1128 }
1129 
1130 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1131 /// accessing some number of bytes out of it, try to gep into the struct to get
1132 /// at its inner goodness. Dive as deep as possible without entering an element
1133 /// with an in-memory size smaller than DstSize.
1134 static Address
1136  llvm::StructType *SrcSTy,
1137  uint64_t DstSize, CodeGenFunction &CGF) {
1138  // We can't dive into a zero-element struct.
1139  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1140 
1141  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1142 
1143  // If the first elt is at least as large as what we're looking for, or if the
1144  // first element is the same size as the whole struct, we can enter it. The
1145  // comparison must be made on the store size and not the alloca size. Using
1146  // the alloca size may overstate the size of the load.
1147  uint64_t FirstEltSize =
1148  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1149  if (FirstEltSize < DstSize &&
1150  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1151  return SrcPtr;
1152 
1153  // GEP into the first element.
1154  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1155 
1156  // If the first element is a struct, recurse.
1157  llvm::Type *SrcTy = SrcPtr.getElementType();
1158  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1159  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1160 
1161  return SrcPtr;
1162 }
1163 
1164 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1165 /// are either integers or pointers. This does a truncation of the value if it
1166 /// is too large or a zero extension if it is too small.
1167 ///
1168 /// This behaves as if the value were coerced through memory, so on big-endian
1169 /// targets the high bits are preserved in a truncation, while little-endian
1170 /// targets preserve the low bits.
1172  llvm::Type *Ty,
1173  CodeGenFunction &CGF) {
1174  if (Val->getType() == Ty)
1175  return Val;
1176 
1177  if (isa<llvm::PointerType>(Val->getType())) {
1178  // If this is Pointer->Pointer avoid conversion to and from int.
1179  if (isa<llvm::PointerType>(Ty))
1180  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1181 
1182  // Convert the pointer to an integer so we can play with its width.
1183  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1184  }
1185 
1186  llvm::Type *DestIntTy = Ty;
1187  if (isa<llvm::PointerType>(DestIntTy))
1188  DestIntTy = CGF.IntPtrTy;
1189 
1190  if (Val->getType() != DestIntTy) {
1191  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1192  if (DL.isBigEndian()) {
1193  // Preserve the high bits on big-endian targets.
1194  // That is what memory coercion does.
1195  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1196  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1197 
1198  if (SrcSize > DstSize) {
1199  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1200  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1201  } else {
1202  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1203  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1204  }
1205  } else {
1206  // Little-endian targets preserve the low bits. No shifts required.
1207  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1208  }
1209  }
1210 
1211  if (isa<llvm::PointerType>(Ty))
1212  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1213  return Val;
1214 }
1215 
1216 
1217 
1218 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1219 /// a pointer to an object of type \arg Ty, known to be aligned to
1220 /// \arg SrcAlign bytes.
1221 ///
1222 /// This safely handles the case when the src type is smaller than the
1223 /// destination type; in this situation the values of bits which not
1224 /// present in the src are undefined.
1226  CodeGenFunction &CGF) {
1227  llvm::Type *SrcTy = Src.getElementType();
1228 
1229  // If SrcTy and Ty are the same, just do a load.
1230  if (SrcTy == Ty)
1231  return CGF.Builder.CreateLoad(Src);
1232 
1233  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1234 
1235  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1236  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1237  SrcTy = Src.getType()->getElementType();
1238  }
1239 
1240  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1241 
1242  // If the source and destination are integer or pointer types, just do an
1243  // extension or truncation to the desired type.
1244  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1245  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1246  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1247  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1248  }
1249 
1250  // If load is legal, just bitcast the src pointer.
1251  if (SrcSize >= DstSize) {
1252  // Generally SrcSize is never greater than DstSize, since this means we are
1253  // losing bits. However, this can happen in cases where the structure has
1254  // additional padding, for example due to a user specified alignment.
1255  //
1256  // FIXME: Assert that we aren't truncating non-padding bits when have access
1257  // to that information.
1258  Src = CGF.Builder.CreateBitCast(Src,
1259  Ty->getPointerTo(Src.getAddressSpace()));
1260  return CGF.Builder.CreateLoad(Src);
1261  }
1262 
1263  // Otherwise do coercion through memory. This is stupid, but simple.
1264  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1265  Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1266  Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1267  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1268  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1269  false);
1270  return CGF.Builder.CreateLoad(Tmp);
1271 }
1272 
1273 // Function to store a first-class aggregate into memory. We prefer to
1274 // store the elements rather than the aggregate to be more friendly to
1275 // fast-isel.
1276 // FIXME: Do we need to recurse here?
1278  Address Dest, bool DestIsVolatile) {
1279  // Prefer scalar stores to first-class aggregate stores.
1280  if (llvm::StructType *STy =
1281  dyn_cast<llvm::StructType>(Val->getType())) {
1282  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1283  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
1284  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1285  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1286  }
1287  } else {
1288  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1289  }
1290 }
1291 
1292 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1293 /// where the source and destination may have different types. The
1294 /// destination is known to be aligned to \arg DstAlign bytes.
1295 ///
1296 /// This safely handles the case when the src type is larger than the
1297 /// destination type; the upper bits of the src will be lost.
1299  Address Dst,
1300  bool DstIsVolatile,
1301  CodeGenFunction &CGF) {
1302  llvm::Type *SrcTy = Src->getType();
1303  llvm::Type *DstTy = Dst.getType()->getElementType();
1304  if (SrcTy == DstTy) {
1305  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1306  return;
1307  }
1308 
1309  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1310 
1311  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1312  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1313  DstTy = Dst.getType()->getElementType();
1314  }
1315 
1316  // If the source and destination are integer or pointer types, just do an
1317  // extension or truncation to the desired type.
1318  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1319  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1320  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1321  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1322  return;
1323  }
1324 
1325  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1326 
1327  // If store is legal, just bitcast the src pointer.
1328  if (SrcSize <= DstSize) {
1329  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1330  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1331  } else {
1332  // Otherwise do coercion through memory. This is stupid, but
1333  // simple.
1334 
1335  // Generally SrcSize is never greater than DstSize, since this means we are
1336  // losing bits. However, this can happen in cases where the structure has
1337  // additional padding, for example due to a user specified alignment.
1338  //
1339  // FIXME: Assert that we aren't truncating non-padding bits when have access
1340  // to that information.
1341  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1342  CGF.Builder.CreateStore(Src, Tmp);
1343  Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1344  Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1345  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1346  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1347  false);
1348  }
1349 }
1350 
1352  const ABIArgInfo &info) {
1353  if (unsigned offset = info.getDirectOffset()) {
1354  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1355  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1356  CharUnits::fromQuantity(offset));
1357  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1358  }
1359  return addr;
1360 }
1361 
1362 namespace {
1363 
1364 /// Encapsulates information about the way function arguments from
1365 /// CGFunctionInfo should be passed to actual LLVM IR function.
1366 class ClangToLLVMArgMapping {
1367  static const unsigned InvalidIndex = ~0U;
1368  unsigned InallocaArgNo;
1369  unsigned SRetArgNo;
1370  unsigned TotalIRArgs;
1371 
1372  /// Arguments of LLVM IR function corresponding to single Clang argument.
1373  struct IRArgs {
1374  unsigned PaddingArgIndex;
1375  // Argument is expanded to IR arguments at positions
1376  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1377  unsigned FirstArgIndex;
1378  unsigned NumberOfArgs;
1379 
1380  IRArgs()
1381  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1382  NumberOfArgs(0) {}
1383  };
1384 
1385  SmallVector<IRArgs, 8> ArgInfo;
1386 
1387 public:
1388  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1389  bool OnlyRequiredArgs = false)
1390  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1391  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1392  construct(Context, FI, OnlyRequiredArgs);
1393  }
1394 
1395  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1396  unsigned getInallocaArgNo() const {
1397  assert(hasInallocaArg());
1398  return InallocaArgNo;
1399  }
1400 
1401  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1402  unsigned getSRetArgNo() const {
1403  assert(hasSRetArg());
1404  return SRetArgNo;
1405  }
1406 
1407  unsigned totalIRArgs() const { return TotalIRArgs; }
1408 
1409  bool hasPaddingArg(unsigned ArgNo) const {
1410  assert(ArgNo < ArgInfo.size());
1411  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1412  }
1413  unsigned getPaddingArgNo(unsigned ArgNo) const {
1414  assert(hasPaddingArg(ArgNo));
1415  return ArgInfo[ArgNo].PaddingArgIndex;
1416  }
1417 
1418  /// Returns index of first IR argument corresponding to ArgNo, and their
1419  /// quantity.
1420  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1421  assert(ArgNo < ArgInfo.size());
1422  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1423  ArgInfo[ArgNo].NumberOfArgs);
1424  }
1425 
1426 private:
1427  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1428  bool OnlyRequiredArgs);
1429 };
1430 
1431 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1432  const CGFunctionInfo &FI,
1433  bool OnlyRequiredArgs) {
1434  unsigned IRArgNo = 0;
1435  bool SwapThisWithSRet = false;
1436  const ABIArgInfo &RetAI = FI.getReturnInfo();
1437 
1438  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1439  SwapThisWithSRet = RetAI.isSRetAfterThis();
1440  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1441  }
1442 
1443  unsigned ArgNo = 0;
1444  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1445  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1446  ++I, ++ArgNo) {
1447  assert(I != FI.arg_end());
1448  QualType ArgType = I->type;
1449  const ABIArgInfo &AI = I->info;
1450  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1451  auto &IRArgs = ArgInfo[ArgNo];
1452 
1453  if (AI.getPaddingType())
1454  IRArgs.PaddingArgIndex = IRArgNo++;
1455 
1456  switch (AI.getKind()) {
1457  case ABIArgInfo::Extend:
1458  case ABIArgInfo::Direct: {
1459  // FIXME: handle sseregparm someday...
1460  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1461  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1462  IRArgs.NumberOfArgs = STy->getNumElements();
1463  } else {
1464  IRArgs.NumberOfArgs = 1;
1465  }
1466  break;
1467  }
1468  case ABIArgInfo::Indirect:
1469  IRArgs.NumberOfArgs = 1;
1470  break;
1471  case ABIArgInfo::Ignore:
1472  case ABIArgInfo::InAlloca:
1473  // ignore and inalloca doesn't have matching LLVM parameters.
1474  IRArgs.NumberOfArgs = 0;
1475  break;
1477  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1478  break;
1479  case ABIArgInfo::Expand:
1480  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1481  break;
1482  }
1483 
1484  if (IRArgs.NumberOfArgs > 0) {
1485  IRArgs.FirstArgIndex = IRArgNo;
1486  IRArgNo += IRArgs.NumberOfArgs;
1487  }
1488 
1489  // Skip over the sret parameter when it comes second. We already handled it
1490  // above.
1491  if (IRArgNo == 1 && SwapThisWithSRet)
1492  IRArgNo++;
1493  }
1494  assert(ArgNo == ArgInfo.size());
1495 
1496  if (FI.usesInAlloca())
1497  InallocaArgNo = IRArgNo++;
1498 
1499  TotalIRArgs = IRArgNo;
1500 }
1501 } // namespace
1502 
1503 /***/
1504 
1506  const auto &RI = FI.getReturnInfo();
1507  return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1508 }
1509 
1511  return ReturnTypeUsesSRet(FI) &&
1512  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1513 }
1514 
1516  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1517  switch (BT->getKind()) {
1518  default:
1519  return false;
1520  case BuiltinType::Float:
1522  case BuiltinType::Double:
1524  case BuiltinType::LongDouble:
1526  }
1527  }
1528 
1529  return false;
1530 }
1531 
1533  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1534  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1535  if (BT->getKind() == BuiltinType::LongDouble)
1537  }
1538  }
1539 
1540  return false;
1541 }
1542 
1544  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1545  return GetFunctionType(FI);
1546 }
1547 
1548 llvm::FunctionType *
1550 
1551  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1552  (void)Inserted;
1553  assert(Inserted && "Recursively being processed?");
1554 
1555  llvm::Type *resultType = nullptr;
1556  const ABIArgInfo &retAI = FI.getReturnInfo();
1557  switch (retAI.getKind()) {
1558  case ABIArgInfo::Expand:
1559  llvm_unreachable("Invalid ABI kind for return argument");
1560 
1561  case ABIArgInfo::Extend:
1562  case ABIArgInfo::Direct:
1563  resultType = retAI.getCoerceToType();
1564  break;
1565 
1566  case ABIArgInfo::InAlloca:
1567  if (retAI.getInAllocaSRet()) {
1568  // sret things on win32 aren't void, they return the sret pointer.
1569  QualType ret = FI.getReturnType();
1570  llvm::Type *ty = ConvertType(ret);
1571  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1572  resultType = llvm::PointerType::get(ty, addressSpace);
1573  } else {
1574  resultType = llvm::Type::getVoidTy(getLLVMContext());
1575  }
1576  break;
1577 
1578  case ABIArgInfo::Indirect:
1579  case ABIArgInfo::Ignore:
1580  resultType = llvm::Type::getVoidTy(getLLVMContext());
1581  break;
1582 
1584  resultType = retAI.getUnpaddedCoerceAndExpandType();
1585  break;
1586  }
1587 
1588  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1589  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1590 
1591  // Add type for sret argument.
1592  if (IRFunctionArgs.hasSRetArg()) {
1593  QualType Ret = FI.getReturnType();
1594  llvm::Type *Ty = ConvertType(Ret);
1595  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1596  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1597  llvm::PointerType::get(Ty, AddressSpace);
1598  }
1599 
1600  // Add type for inalloca argument.
1601  if (IRFunctionArgs.hasInallocaArg()) {
1602  auto ArgStruct = FI.getArgStruct();
1603  assert(ArgStruct);
1604  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1605  }
1606 
1607  // Add in all of the required arguments.
1608  unsigned ArgNo = 0;
1610  ie = it + FI.getNumRequiredArgs();
1611  for (; it != ie; ++it, ++ArgNo) {
1612  const ABIArgInfo &ArgInfo = it->info;
1613 
1614  // Insert a padding type to ensure proper alignment.
1615  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1616  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1617  ArgInfo.getPaddingType();
1618 
1619  unsigned FirstIRArg, NumIRArgs;
1620  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1621 
1622  switch (ArgInfo.getKind()) {
1623  case ABIArgInfo::Ignore:
1624  case ABIArgInfo::InAlloca:
1625  assert(NumIRArgs == 0);
1626  break;
1627 
1628  case ABIArgInfo::Indirect: {
1629  assert(NumIRArgs == 1);
1630  // indirect arguments are always on the stack, which is alloca addr space.
1631  llvm::Type *LTy = ConvertTypeForMem(it->type);
1632  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1633  CGM.getDataLayout().getAllocaAddrSpace());
1634  break;
1635  }
1636 
1637  case ABIArgInfo::Extend:
1638  case ABIArgInfo::Direct: {
1639  // Fast-isel and the optimizer generally like scalar values better than
1640  // FCAs, so we flatten them if this is safe to do for this argument.
1641  llvm::Type *argType = ArgInfo.getCoerceToType();
1642  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1643  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1644  assert(NumIRArgs == st->getNumElements());
1645  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1646  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1647  } else {
1648  assert(NumIRArgs == 1);
1649  ArgTypes[FirstIRArg] = argType;
1650  }
1651  break;
1652  }
1653 
1655  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1656  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1657  *ArgTypesIter++ = EltTy;
1658  }
1659  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1660  break;
1661  }
1662 
1663  case ABIArgInfo::Expand:
1664  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1665  getExpandedTypes(it->type, ArgTypesIter);
1666  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1667  break;
1668  }
1669  }
1670 
1671  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1672  assert(Erased && "Not in set?");
1673 
1674  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1675 }
1676 
1678  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1679  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1680 
1681  if (!isFuncTypeConvertible(FPT))
1682  return llvm::StructType::get(getLLVMContext());
1683 
1684  const CGFunctionInfo *Info;
1685  if (isa<CXXDestructorDecl>(MD))
1686  Info =
1688  else
1689  Info = &arrangeCXXMethodDeclaration(MD);
1690  return GetFunctionType(*Info);
1691 }
1692 
1694  llvm::AttrBuilder &FuncAttrs,
1695  const FunctionProtoType *FPT) {
1696  if (!FPT)
1697  return;
1698 
1700  FPT->isNothrow())
1701  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1702 }
1703 
1704 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1705  bool AttrOnCallSite,
1706  llvm::AttrBuilder &FuncAttrs) {
1707  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1708  if (!HasOptnone) {
1709  if (CodeGenOpts.OptimizeSize)
1710  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1711  if (CodeGenOpts.OptimizeSize == 2)
1712  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1713  }
1714 
1715  if (CodeGenOpts.DisableRedZone)
1716  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1717  if (CodeGenOpts.IndirectTlsSegRefs)
1718  FuncAttrs.addAttribute("indirect-tls-seg-refs");
1719  if (CodeGenOpts.NoImplicitFloat)
1720  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1721 
1722  if (AttrOnCallSite) {
1723  // Attributes that should go on the call site only.
1724  if (!CodeGenOpts.SimplifyLibCalls ||
1725  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1726  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1727  if (!CodeGenOpts.TrapFuncName.empty())
1728  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1729  } else {
1730  // Attributes that should go on the function, but not the call site.
1731  if (!CodeGenOpts.DisableFPElim) {
1732  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1733  } else if (CodeGenOpts.OmitLeafFramePointer) {
1734  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1735  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1736  } else {
1737  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1738  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1739  }
1740 
1741  FuncAttrs.addAttribute("less-precise-fpmad",
1742  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1743 
1744  if (CodeGenOpts.NullPointerIsValid)
1745  FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1746  if (!CodeGenOpts.FPDenormalMode.empty())
1747  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1748 
1749  FuncAttrs.addAttribute("no-trapping-math",
1750  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1751 
1752  // Strict (compliant) code is the default, so only add this attribute to
1753  // indicate that we are trying to workaround a problem case.
1754  if (!CodeGenOpts.StrictFloatCastOverflow)
1755  FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1756 
1757  // TODO: Are these all needed?
1758  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1759  FuncAttrs.addAttribute("no-infs-fp-math",
1760  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1761  FuncAttrs.addAttribute("no-nans-fp-math",
1762  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1763  FuncAttrs.addAttribute("unsafe-fp-math",
1764  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1765  FuncAttrs.addAttribute("use-soft-float",
1766  llvm::toStringRef(CodeGenOpts.SoftFloat));
1767  FuncAttrs.addAttribute("stack-protector-buffer-size",
1768  llvm::utostr(CodeGenOpts.SSPBufferSize));
1769  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1770  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1771  FuncAttrs.addAttribute(
1772  "correctly-rounded-divide-sqrt-fp-math",
1773  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1774 
1775  if (getLangOpts().OpenCL)
1776  FuncAttrs.addAttribute("denorms-are-zero",
1777  llvm::toStringRef(CodeGenOpts.FlushDenorm));
1778 
1779  // TODO: Reciprocal estimate codegen options should apply to instructions?
1780  const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1781  if (!Recips.empty())
1782  FuncAttrs.addAttribute("reciprocal-estimates",
1783  llvm::join(Recips, ","));
1784 
1785  if (!CodeGenOpts.PreferVectorWidth.empty() &&
1786  CodeGenOpts.PreferVectorWidth != "none")
1787  FuncAttrs.addAttribute("prefer-vector-width",
1788  CodeGenOpts.PreferVectorWidth);
1789 
1790  if (CodeGenOpts.StackRealignment)
1791  FuncAttrs.addAttribute("stackrealign");
1792  if (CodeGenOpts.Backchain)
1793  FuncAttrs.addAttribute("backchain");
1794 
1795  if (CodeGenOpts.SpeculativeLoadHardening)
1796  FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1797  }
1798 
1799  if (getLangOpts().assumeFunctionsAreConvergent()) {
1800  // Conservatively, mark all functions and calls in CUDA and OpenCL as
1801  // convergent (meaning, they may call an intrinsically convergent op, such
1802  // as __syncthreads() / barrier(), and so can't have certain optimizations
1803  // applied around them). LLVM will remove this attribute where it safely
1804  // can.
1805  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1806  }
1807 
1808  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1809  // Exceptions aren't supported in CUDA device code.
1810  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1811 
1812  // Respect -fcuda-flush-denormals-to-zero.
1813  if (CodeGenOpts.FlushDenorm)
1814  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1815  }
1816 
1817  for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1818  StringRef Var, Value;
1819  std::tie(Var, Value) = Attr.split('=');
1820  FuncAttrs.addAttribute(Var, Value);
1821  }
1822 }
1823 
1824 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1825  llvm::AttrBuilder FuncAttrs;
1826  ConstructDefaultFnAttrList(F.getName(),
1827  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1828  /* AttrOnCallsite = */ false, FuncAttrs);
1829  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1830 }
1831 
1833  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1834  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1835  llvm::AttrBuilder FuncAttrs;
1836  llvm::AttrBuilder RetAttrs;
1837 
1838  CallingConv = FI.getEffectiveCallingConvention();
1839  if (FI.isNoReturn())
1840  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1841 
1842  // If we have information about the function prototype, we can learn
1843  // attributes from there.
1845  CalleeInfo.getCalleeFunctionProtoType());
1846 
1847  const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1848 
1849  bool HasOptnone = false;
1850  // FIXME: handle sseregparm someday...
1851  if (TargetDecl) {
1852  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1853  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1854  if (TargetDecl->hasAttr<NoThrowAttr>())
1855  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1856  if (TargetDecl->hasAttr<NoReturnAttr>())
1857  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1858  if (TargetDecl->hasAttr<ColdAttr>())
1859  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1860  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1861  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1862  if (TargetDecl->hasAttr<ConvergentAttr>())
1863  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1864 
1865  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1867  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1868  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1869  // These attributes are not inherited by overloads.
1870  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1871  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1872  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1873  }
1874 
1875  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1876  if (TargetDecl->hasAttr<ConstAttr>()) {
1877  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1878  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1879  } else if (TargetDecl->hasAttr<PureAttr>()) {
1880  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1881  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1882  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1883  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1884  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1885  }
1886  if (TargetDecl->hasAttr<RestrictAttr>())
1887  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1888  if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1889  !CodeGenOpts.NullPointerIsValid)
1890  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1891  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1892  FuncAttrs.addAttribute("no_caller_saved_registers");
1893  if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1894  FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1895 
1896  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1897  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1898  Optional<unsigned> NumElemsParam;
1899  if (AllocSize->getNumElemsParam().isValid())
1900  NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1901  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1902  NumElemsParam);
1903  }
1904  }
1905 
1906  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1907 
1908  // This must run after constructing the default function attribute list
1909  // to ensure that the speculative load hardening attribute is removed
1910  // in the case where the -mspeculative-load-hardening flag was passed.
1911  if (TargetDecl) {
1912  if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1913  FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1914  if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1915  FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1916  }
1917 
1918  if (CodeGenOpts.EnableSegmentedStacks &&
1919  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1920  FuncAttrs.addAttribute("split-stack");
1921 
1922  // Add NonLazyBind attribute to function declarations when -fno-plt
1923  // is used.
1924  if (TargetDecl && CodeGenOpts.NoPLT) {
1925  if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1926  if (!Fn->isDefined() && !AttrOnCallSite) {
1927  FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1928  }
1929  }
1930  }
1931 
1932  if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1933  if (getLangOpts().OpenCLVersion <= 120) {
1934  // OpenCL v1.2 Work groups are always uniform
1935  FuncAttrs.addAttribute("uniform-work-group-size", "true");
1936  } else {
1937  // OpenCL v2.0 Work groups may be whether uniform or not.
1938  // '-cl-uniform-work-group-size' compile option gets a hint
1939  // to the compiler that the global work-size be a multiple of
1940  // the work-group size specified to clEnqueueNDRangeKernel
1941  // (i.e. work groups are uniform).
1942  FuncAttrs.addAttribute("uniform-work-group-size",
1943  llvm::toStringRef(CodeGenOpts.UniformWGSize));
1944  }
1945  }
1946 
1947  if (!AttrOnCallSite) {
1948  bool DisableTailCalls = false;
1949 
1950  if (CodeGenOpts.DisableTailCalls)
1951  DisableTailCalls = true;
1952  else if (TargetDecl) {
1953  if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1954  TargetDecl->hasAttr<AnyX86InterruptAttr>())
1955  DisableTailCalls = true;
1956  else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1957  if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1958  if (!BD->doesNotEscape())
1959  DisableTailCalls = true;
1960  }
1961  }
1962 
1963  FuncAttrs.addAttribute("disable-tail-calls",
1964  llvm::toStringRef(DisableTailCalls));
1965  GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1966  }
1967 
1968  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1969 
1970  QualType RetTy = FI.getReturnType();
1971  const ABIArgInfo &RetAI = FI.getReturnInfo();
1972  switch (RetAI.getKind()) {
1973  case ABIArgInfo::Extend:
1974  if (RetAI.isSignExt())
1975  RetAttrs.addAttribute(llvm::Attribute::SExt);
1976  else
1977  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1978  LLVM_FALLTHROUGH;
1979  case ABIArgInfo::Direct:
1980  if (RetAI.getInReg())
1981  RetAttrs.addAttribute(llvm::Attribute::InReg);
1982  break;
1983  case ABIArgInfo::Ignore:
1984  break;
1985 
1986  case ABIArgInfo::InAlloca:
1987  case ABIArgInfo::Indirect: {
1988  // inalloca and sret disable readnone and readonly
1989  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1990  .removeAttribute(llvm::Attribute::ReadNone);
1991  break;
1992  }
1993 
1995  break;
1996 
1997  case ABIArgInfo::Expand:
1998  llvm_unreachable("Invalid ABI kind for return argument");
1999  }
2000 
2001  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2002  QualType PTy = RefTy->getPointeeType();
2003  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2004  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2005  .getQuantity());
2006  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2007  !CodeGenOpts.NullPointerIsValid)
2008  RetAttrs.addAttribute(llvm::Attribute::NonNull);
2009  }
2010 
2011  bool hasUsedSRet = false;
2012  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2013 
2014  // Attach attributes to sret.
2015  if (IRFunctionArgs.hasSRetArg()) {
2016  llvm::AttrBuilder SRETAttrs;
2017  if (!RetAI.getSuppressSRet())
2018  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2019  hasUsedSRet = true;
2020  if (RetAI.getInReg())
2021  SRETAttrs.addAttribute(llvm::Attribute::InReg);
2022  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2023  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2024  }
2025 
2026  // Attach attributes to inalloca argument.
2027  if (IRFunctionArgs.hasInallocaArg()) {
2028  llvm::AttrBuilder Attrs;
2029  Attrs.addAttribute(llvm::Attribute::InAlloca);
2030  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2031  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2032  }
2033 
2034  unsigned ArgNo = 0;
2036  E = FI.arg_end();
2037  I != E; ++I, ++ArgNo) {
2038  QualType ParamType = I->type;
2039  const ABIArgInfo &AI = I->info;
2040  llvm::AttrBuilder Attrs;
2041 
2042  // Add attribute for padding argument, if necessary.
2043  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2044  if (AI.getPaddingInReg()) {
2045  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2046  llvm::AttributeSet::get(
2047  getLLVMContext(),
2048  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2049  }
2050  }
2051 
2052  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2053  // have the corresponding parameter variable. It doesn't make
2054  // sense to do it here because parameters are so messed up.
2055  switch (AI.getKind()) {
2056  case ABIArgInfo::Extend:
2057  if (AI.isSignExt())
2058  Attrs.addAttribute(llvm::Attribute::SExt);
2059  else
2060  Attrs.addAttribute(llvm::Attribute::ZExt);
2061  LLVM_FALLTHROUGH;
2062  case ABIArgInfo::Direct:
2063  if (ArgNo == 0 && FI.isChainCall())
2064  Attrs.addAttribute(llvm::Attribute::Nest);
2065  else if (AI.getInReg())
2066  Attrs.addAttribute(llvm::Attribute::InReg);
2067  break;
2068 
2069  case ABIArgInfo::Indirect: {
2070  if (AI.getInReg())
2071  Attrs.addAttribute(llvm::Attribute::InReg);
2072 
2073  if (AI.getIndirectByVal())
2074  Attrs.addAttribute(llvm::Attribute::ByVal);
2075 
2076  CharUnits Align = AI.getIndirectAlign();
2077 
2078  // In a byval argument, it is important that the required
2079  // alignment of the type is honored, as LLVM might be creating a
2080  // *new* stack object, and needs to know what alignment to give
2081  // it. (Sometimes it can deduce a sensible alignment on its own,
2082  // but not if clang decides it must emit a packed struct, or the
2083  // user specifies increased alignment requirements.)
2084  //
2085  // This is different from indirect *not* byval, where the object
2086  // exists already, and the align attribute is purely
2087  // informative.
2088  assert(!Align.isZero());
2089 
2090  // For now, only add this when we have a byval argument.
2091  // TODO: be less lazy about updating test cases.
2092  if (AI.getIndirectByVal())
2093  Attrs.addAlignmentAttr(Align.getQuantity());
2094 
2095  // byval disables readnone and readonly.
2096  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2097  .removeAttribute(llvm::Attribute::ReadNone);
2098  break;
2099  }
2100  case ABIArgInfo::Ignore:
2101  case ABIArgInfo::Expand:
2103  break;
2104 
2105  case ABIArgInfo::InAlloca:
2106  // inalloca disables readnone and readonly.
2107  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2108  .removeAttribute(llvm::Attribute::ReadNone);
2109  continue;
2110  }
2111 
2112  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2113  QualType PTy = RefTy->getPointeeType();
2114  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2115  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2116  .getQuantity());
2117  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2118  !CodeGenOpts.NullPointerIsValid)
2119  Attrs.addAttribute(llvm::Attribute::NonNull);
2120  }
2121 
2122  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2124  break;
2125 
2127  // Add 'sret' if we haven't already used it for something, but
2128  // only if the result is void.
2129  if (!hasUsedSRet && RetTy->isVoidType()) {
2130  Attrs.addAttribute(llvm::Attribute::StructRet);
2131  hasUsedSRet = true;
2132  }
2133 
2134  // Add 'noalias' in either case.
2135  Attrs.addAttribute(llvm::Attribute::NoAlias);
2136 
2137  // Add 'dereferenceable' and 'alignment'.
2138  auto PTy = ParamType->getPointeeType();
2139  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2140  auto info = getContext().getTypeInfoInChars(PTy);
2141  Attrs.addDereferenceableAttr(info.first.getQuantity());
2142  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2143  info.second.getQuantity()));
2144  }
2145  break;
2146  }
2147 
2149  Attrs.addAttribute(llvm::Attribute::SwiftError);
2150  break;
2151 
2153  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2154  break;
2155  }
2156 
2157  if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2158  Attrs.addAttribute(llvm::Attribute::NoCapture);
2159 
2160  if (Attrs.hasAttributes()) {
2161  unsigned FirstIRArg, NumIRArgs;
2162  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2163  for (unsigned i = 0; i < NumIRArgs; i++)
2164  ArgAttrs[FirstIRArg + i] =
2165  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2166  }
2167  }
2168  assert(ArgNo == FI.arg_size());
2169 
2170  AttrList = llvm::AttributeList::get(
2171  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2172  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2173 }
2174 
2175 /// An argument came in as a promoted argument; demote it back to its
2176 /// declared type.
2178  const VarDecl *var,
2179  llvm::Value *value) {
2180  llvm::Type *varType = CGF.ConvertType(var->getType());
2181 
2182  // This can happen with promotions that actually don't change the
2183  // underlying type, like the enum promotions.
2184  if (value->getType() == varType) return value;
2185 
2186  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2187  && "unexpected promotion type");
2188 
2189  if (isa<llvm::IntegerType>(varType))
2190  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2191 
2192  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2193 }
2194 
2195 /// Returns the attribute (either parameter attribute, or function
2196 /// attribute), which declares argument ArgNo to be non-null.
2197 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2198  QualType ArgType, unsigned ArgNo) {
2199  // FIXME: __attribute__((nonnull)) can also be applied to:
2200  // - references to pointers, where the pointee is known to be
2201  // nonnull (apparently a Clang extension)
2202  // - transparent unions containing pointers
2203  // In the former case, LLVM IR cannot represent the constraint. In
2204  // the latter case, we have no guarantee that the transparent union
2205  // is in fact passed as a pointer.
2206  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2207  return nullptr;
2208  // First, check attribute on parameter itself.
2209  if (PVD) {
2210  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2211  return ParmNNAttr;
2212  }
2213  // Check function attributes.
2214  if (!FD)
2215  return nullptr;
2216  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2217  if (NNAttr->isNonNull(ArgNo))
2218  return NNAttr;
2219  }
2220  return nullptr;
2221 }
2222 
2223 namespace {
2224  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2225  Address Temp;
2226  Address Arg;
2227  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2228  void Emit(CodeGenFunction &CGF, Flags flags) override {
2229  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2230  CGF.Builder.CreateStore(errorValue, Arg);
2231  }
2232  };
2233 }
2234 
2236  llvm::Function *Fn,
2237  const FunctionArgList &Args) {
2238  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2239  // Naked functions don't have prologues.
2240  return;
2241 
2242  // If this is an implicit-return-zero function, go ahead and
2243  // initialize the return value. TODO: it might be nice to have
2244  // a more general mechanism for this that didn't require synthesized
2245  // return statements.
2246  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2247  if (FD->hasImplicitReturnZero()) {
2248  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2249  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2250  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2251  Builder.CreateStore(Zero, ReturnValue);
2252  }
2253  }
2254 
2255  // FIXME: We no longer need the types from FunctionArgList; lift up and
2256  // simplify.
2257 
2258  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2259  // Flattened function arguments.
2261  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2262  for (auto &Arg : Fn->args()) {
2263  FnArgs.push_back(&Arg);
2264  }
2265  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2266 
2267  // If we're using inalloca, all the memory arguments are GEPs off of the last
2268  // parameter, which is a pointer to the complete memory area.
2269  Address ArgStruct = Address::invalid();
2270  if (IRFunctionArgs.hasInallocaArg()) {
2271  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2272  FI.getArgStructAlignment());
2273 
2274  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2275  }
2276 
2277  // Name the struct return parameter.
2278  if (IRFunctionArgs.hasSRetArg()) {
2279  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2280  AI->setName("agg.result");
2281  AI->addAttr(llvm::Attribute::NoAlias);
2282  }
2283 
2284  // Track if we received the parameter as a pointer (indirect, byval, or
2285  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2286  // into a local alloca for us.
2288  ArgVals.reserve(Args.size());
2289 
2290  // Create a pointer value for every parameter declaration. This usually
2291  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2292  // any cleanups or do anything that might unwind. We do that separately, so
2293  // we can push the cleanups in the correct order for the ABI.
2294  assert(FI.arg_size() == Args.size() &&
2295  "Mismatch between function signature & arguments.");
2296  unsigned ArgNo = 0;
2298  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2299  i != e; ++i, ++info_it, ++ArgNo) {
2300  const VarDecl *Arg = *i;
2301  const ABIArgInfo &ArgI = info_it->info;
2302 
2303  bool isPromoted =
2304  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2305  // We are converting from ABIArgInfo type to VarDecl type directly, unless
2306  // the parameter is promoted. In this case we convert to
2307  // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2308  QualType Ty = isPromoted ? info_it->type : Arg->getType();
2309  assert(hasScalarEvaluationKind(Ty) ==
2310  hasScalarEvaluationKind(Arg->getType()));
2311 
2312  unsigned FirstIRArg, NumIRArgs;
2313  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2314 
2315  switch (ArgI.getKind()) {
2316  case ABIArgInfo::InAlloca: {
2317  assert(NumIRArgs == 0);
2318  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2319  Address V =
2320  Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2321  ArgVals.push_back(ParamValue::forIndirect(V));
2322  break;
2323  }
2324 
2325  case ABIArgInfo::Indirect: {
2326  assert(NumIRArgs == 1);
2327  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2328 
2329  if (!hasScalarEvaluationKind(Ty)) {
2330  // Aggregates and complex variables are accessed by reference. All we
2331  // need to do is realign the value, if requested.
2332  Address V = ParamAddr;
2333  if (ArgI.getIndirectRealign()) {
2334  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2335 
2336  // Copy from the incoming argument pointer to the temporary with the
2337  // appropriate alignment.
2338  //
2339  // FIXME: We should have a common utility for generating an aggregate
2340  // copy.
2342  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2343  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2344  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2345  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2346  V = AlignedTemp;
2347  }
2348  ArgVals.push_back(ParamValue::forIndirect(V));
2349  } else {
2350  // Load scalar value from indirect argument.
2351  llvm::Value *V =
2352  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2353 
2354  if (isPromoted)
2355  V = emitArgumentDemotion(*this, Arg, V);
2356  ArgVals.push_back(ParamValue::forDirect(V));
2357  }
2358  break;
2359  }
2360 
2361  case ABIArgInfo::Extend:
2362  case ABIArgInfo::Direct: {
2363 
2364  // If we have the trivial case, handle it with no muss and fuss.
2365  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2366  ArgI.getCoerceToType() == ConvertType(Ty) &&
2367  ArgI.getDirectOffset() == 0) {
2368  assert(NumIRArgs == 1);
2369  llvm::Value *V = FnArgs[FirstIRArg];
2370  auto AI = cast<llvm::Argument>(V);
2371 
2372  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2373  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2374  PVD->getFunctionScopeIndex()) &&
2375  !CGM.getCodeGenOpts().NullPointerIsValid)
2376  AI->addAttr(llvm::Attribute::NonNull);
2377 
2378  QualType OTy = PVD->getOriginalType();
2379  if (const auto *ArrTy =
2380  getContext().getAsConstantArrayType(OTy)) {
2381  // A C99 array parameter declaration with the static keyword also
2382  // indicates dereferenceability, and if the size is constant we can
2383  // use the dereferenceable attribute (which requires the size in
2384  // bytes).
2385  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2386  QualType ETy = ArrTy->getElementType();
2387  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2388  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2389  ArrSize) {
2390  llvm::AttrBuilder Attrs;
2391  Attrs.addDereferenceableAttr(
2392  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2393  AI->addAttrs(Attrs);
2394  } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2395  !CGM.getCodeGenOpts().NullPointerIsValid) {
2396  AI->addAttr(llvm::Attribute::NonNull);
2397  }
2398  }
2399  } else if (const auto *ArrTy =
2400  getContext().getAsVariableArrayType(OTy)) {
2401  // For C99 VLAs with the static keyword, we don't know the size so
2402  // we can't use the dereferenceable attribute, but in addrspace(0)
2403  // we know that it must be nonnull.
2404  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2405  !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2406  !CGM.getCodeGenOpts().NullPointerIsValid)
2407  AI->addAttr(llvm::Attribute::NonNull);
2408  }
2409 
2410  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2411  if (!AVAttr)
2412  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2413  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2414  if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2415  // If alignment-assumption sanitizer is enabled, we do *not* add
2416  // alignment attribute here, but emit normal alignment assumption,
2417  // so the UBSAN check could function.
2418  llvm::Value *AlignmentValue =
2419  EmitScalarExpr(AVAttr->getAlignment());
2420  llvm::ConstantInt *AlignmentCI =
2421  cast<llvm::ConstantInt>(AlignmentValue);
2422  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2423  +llvm::Value::MaximumAlignment);
2424  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2425  }
2426  }
2427 
2428  if (Arg->getType().isRestrictQualified())
2429  AI->addAttr(llvm::Attribute::NoAlias);
2430 
2431  // LLVM expects swifterror parameters to be used in very restricted
2432  // ways. Copy the value into a less-restricted temporary.
2433  if (FI.getExtParameterInfo(ArgNo).getABI()
2435  QualType pointeeTy = Ty->getPointeeType();
2436  assert(pointeeTy->isPointerType());
2437  Address temp =
2438  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2439  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2440  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2441  Builder.CreateStore(incomingErrorValue, temp);
2442  V = temp.getPointer();
2443 
2444  // Push a cleanup to copy the value back at the end of the function.
2445  // The convention does not guarantee that the value will be written
2446  // back if the function exits with an unwind exception.
2447  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2448  }
2449 
2450  // Ensure the argument is the correct type.
2451  if (V->getType() != ArgI.getCoerceToType())
2452  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2453 
2454  if (isPromoted)
2455  V = emitArgumentDemotion(*this, Arg, V);
2456 
2457  // Because of merging of function types from multiple decls it is
2458  // possible for the type of an argument to not match the corresponding
2459  // type in the function type. Since we are codegening the callee
2460  // in here, add a cast to the argument type.
2461  llvm::Type *LTy = ConvertType(Arg->getType());
2462  if (V->getType() != LTy)
2463  V = Builder.CreateBitCast(V, LTy);
2464 
2465  ArgVals.push_back(ParamValue::forDirect(V));
2466  break;
2467  }
2468 
2469  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2470  Arg->getName());
2471 
2472  // Pointer to store into.
2473  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2474 
2475  // Fast-isel and the optimizer generally like scalar values better than
2476  // FCAs, so we flatten them if this is safe to do for this argument.
2477  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2478  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2479  STy->getNumElements() > 1) {
2480  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2481  llvm::Type *DstTy = Ptr.getElementType();
2482  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2483 
2484  Address AddrToStoreInto = Address::invalid();
2485  if (SrcSize <= DstSize) {
2486  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2487  } else {
2488  AddrToStoreInto =
2489  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2490  }
2491 
2492  assert(STy->getNumElements() == NumIRArgs);
2493  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2494  auto AI = FnArgs[FirstIRArg + i];
2495  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2496  Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2497  Builder.CreateStore(AI, EltPtr);
2498  }
2499 
2500  if (SrcSize > DstSize) {
2501  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2502  }
2503 
2504  } else {
2505  // Simple case, just do a coerced store of the argument into the alloca.
2506  assert(NumIRArgs == 1);
2507  auto AI = FnArgs[FirstIRArg];
2508  AI->setName(Arg->getName() + ".coerce");
2509  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2510  }
2511 
2512  // Match to what EmitParmDecl is expecting for this type.
2514  llvm::Value *V =
2515  EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2516  if (isPromoted)
2517  V = emitArgumentDemotion(*this, Arg, V);
2518  ArgVals.push_back(ParamValue::forDirect(V));
2519  } else {
2520  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2521  }
2522  break;
2523  }
2524 
2526  // Reconstruct into a temporary.
2527  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2528  ArgVals.push_back(ParamValue::forIndirect(alloca));
2529 
2530  auto coercionType = ArgI.getCoerceAndExpandType();
2531  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2532 
2533  unsigned argIndex = FirstIRArg;
2534  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2535  llvm::Type *eltType = coercionType->getElementType(i);
2537  continue;
2538 
2539  auto eltAddr = Builder.CreateStructGEP(alloca, i);
2540  auto elt = FnArgs[argIndex++];
2541  Builder.CreateStore(elt, eltAddr);
2542  }
2543  assert(argIndex == FirstIRArg + NumIRArgs);
2544  break;
2545  }
2546 
2547  case ABIArgInfo::Expand: {
2548  // If this structure was expanded into multiple arguments then
2549  // we need to create a temporary and reconstruct it from the
2550  // arguments.
2551  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2552  LValue LV = MakeAddrLValue(Alloca, Ty);
2553  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2554 
2555  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2556  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2557  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2558  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2559  auto AI = FnArgs[FirstIRArg + i];
2560  AI->setName(Arg->getName() + "." + Twine(i));
2561  }
2562  break;
2563  }
2564 
2565  case ABIArgInfo::Ignore:
2566  assert(NumIRArgs == 0);
2567  // Initialize the local variable appropriately.
2568  if (!hasScalarEvaluationKind(Ty)) {
2569  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2570  } else {
2571  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2572  ArgVals.push_back(ParamValue::forDirect(U));
2573  }
2574  break;
2575  }
2576  }
2577 
2578  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2579  for (int I = Args.size() - 1; I >= 0; --I)
2580  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2581  } else {
2582  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2583  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2584  }
2585 }
2586 
2587 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2588  while (insn->use_empty()) {
2589  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2590  if (!bitcast) return;
2591 
2592  // This is "safe" because we would have used a ConstantExpr otherwise.
2593  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2594  bitcast->eraseFromParent();
2595  }
2596 }
2597 
2598 /// Try to emit a fused autorelease of a return result.
2600  llvm::Value *result) {
2601  // We must be immediately followed the cast.
2602  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2603  if (BB->empty()) return nullptr;
2604  if (&BB->back() != result) return nullptr;
2605 
2606  llvm::Type *resultType = result->getType();
2607 
2608  // result is in a BasicBlock and is therefore an Instruction.
2609  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2610 
2612 
2613  // Look for:
2614  // %generator = bitcast %type1* %generator2 to %type2*
2615  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2616  // We would have emitted this as a constant if the operand weren't
2617  // an Instruction.
2618  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2619 
2620  // Require the generator to be immediately followed by the cast.
2621  if (generator->getNextNode() != bitcast)
2622  return nullptr;
2623 
2624  InstsToKill.push_back(bitcast);
2625  }
2626 
2627  // Look for:
2628  // %generator = call i8* @objc_retain(i8* %originalResult)
2629  // or
2630  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2631  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2632  if (!call) return nullptr;
2633 
2634  bool doRetainAutorelease;
2635 
2636  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2637  doRetainAutorelease = true;
2638  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2640  doRetainAutorelease = false;
2641 
2642  // If we emitted an assembly marker for this call (and the
2643  // ARCEntrypoints field should have been set if so), go looking
2644  // for that call. If we can't find it, we can't do this
2645  // optimization. But it should always be the immediately previous
2646  // instruction, unless we needed bitcasts around the call.
2648  llvm::Instruction *prev = call->getPrevNode();
2649  assert(prev);
2650  if (isa<llvm::BitCastInst>(prev)) {
2651  prev = prev->getPrevNode();
2652  assert(prev);
2653  }
2654  assert(isa<llvm::CallInst>(prev));
2655  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2657  InstsToKill.push_back(prev);
2658  }
2659  } else {
2660  return nullptr;
2661  }
2662 
2663  result = call->getArgOperand(0);
2664  InstsToKill.push_back(call);
2665 
2666  // Keep killing bitcasts, for sanity. Note that we no longer care
2667  // about precise ordering as long as there's exactly one use.
2668  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2669  if (!bitcast->hasOneUse()) break;
2670  InstsToKill.push_back(bitcast);
2671  result = bitcast->getOperand(0);
2672  }
2673 
2674  // Delete all the unnecessary instructions, from latest to earliest.
2675  for (auto *I : InstsToKill)
2676  I->eraseFromParent();
2677 
2678  // Do the fused retain/autorelease if we were asked to.
2679  if (doRetainAutorelease)
2680  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2681 
2682  // Cast back to the result type.
2683  return CGF.Builder.CreateBitCast(result, resultType);
2684 }
2685 
2686 /// If this is a +1 of the value of an immutable 'self', remove it.
2688  llvm::Value *result) {
2689  // This is only applicable to a method with an immutable 'self'.
2690  const ObjCMethodDecl *method =
2691  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2692  if (!method) return nullptr;
2693  const VarDecl *self = method->getSelfDecl();
2694  if (!self->getType().isConstQualified()) return nullptr;
2695 
2696  // Look for a retain call.
2697  llvm::CallInst *retainCall =
2698  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2699  if (!retainCall ||
2700  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2701  return nullptr;
2702 
2703  // Look for an ordinary load of 'self'.
2704  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2705  llvm::LoadInst *load =
2706  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2707  if (!load || load->isAtomic() || load->isVolatile() ||
2708  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2709  return nullptr;
2710 
2711  // Okay! Burn it all down. This relies for correctness on the
2712  // assumption that the retain is emitted as part of the return and
2713  // that thereafter everything is used "linearly".
2714  llvm::Type *resultType = result->getType();
2715  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2716  assert(retainCall->use_empty());
2717  retainCall->eraseFromParent();
2718  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2719 
2720  return CGF.Builder.CreateBitCast(load, resultType);
2721 }
2722 
2723 /// Emit an ARC autorelease of the result of a function.
2724 ///
2725 /// \return the value to actually return from the function
2727  llvm::Value *result) {
2728  // If we're returning 'self', kill the initial retain. This is a
2729  // heuristic attempt to "encourage correctness" in the really unfortunate
2730  // case where we have a return of self during a dealloc and we desperately
2731  // need to avoid the possible autorelease.
2732  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2733  return self;
2734 
2735  // At -O0, try to emit a fused retain/autorelease.
2736  if (CGF.shouldUseFusedARCCalls())
2737  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2738  return fused;
2739 
2740  return CGF.EmitARCAutoreleaseReturnValue(result);
2741 }
2742 
2743 /// Heuristically search for a dominating store to the return-value slot.
2744 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2745  // Check if a User is a store which pointerOperand is the ReturnValue.
2746  // We are looking for stores to the ReturnValue, not for stores of the
2747  // ReturnValue to some other location.
2748  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2749  auto *SI = dyn_cast<llvm::StoreInst>(U);
2750  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2751  return nullptr;
2752  // These aren't actually possible for non-coerced returns, and we
2753  // only care about non-coerced returns on this code path.
2754  assert(!SI->isAtomic() && !SI->isVolatile());
2755  return SI;
2756  };
2757  // If there are multiple uses of the return-value slot, just check
2758  // for something immediately preceding the IP. Sometimes this can
2759  // happen with how we generate implicit-returns; it can also happen
2760  // with noreturn cleanups.
2761  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2762  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2763  if (IP->empty()) return nullptr;
2764  llvm::Instruction *I = &IP->back();
2765 
2766  // Skip lifetime markers
2767  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2768  IE = IP->rend();
2769  II != IE; ++II) {
2770  if (llvm::IntrinsicInst *Intrinsic =
2771  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2772  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2773  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2774  ++II;
2775  if (II == IE)
2776  break;
2777  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2778  continue;
2779  }
2780  }
2781  I = &*II;
2782  break;
2783  }
2784 
2785  return GetStoreIfValid(I);
2786  }
2787 
2788  llvm::StoreInst *store =
2789  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2790  if (!store) return nullptr;
2791 
2792  // Now do a first-and-dirty dominance check: just walk up the
2793  // single-predecessors chain from the current insertion point.
2794  llvm::BasicBlock *StoreBB = store->getParent();
2795  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2796  while (IP != StoreBB) {
2797  if (!(IP = IP->getSinglePredecessor()))
2798  return nullptr;
2799  }
2800 
2801  // Okay, the store's basic block dominates the insertion point; we
2802  // can do our thing.
2803  return store;
2804 }
2805 
2807  bool EmitRetDbgLoc,
2808  SourceLocation EndLoc) {
2809  if (FI.isNoReturn()) {
2810  // Noreturn functions don't return.
2811  EmitUnreachable(EndLoc);
2812  return;
2813  }
2814 
2815  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2816  // Naked functions don't have epilogues.
2817  Builder.CreateUnreachable();
2818  return;
2819  }
2820 
2821  // Functions with no result always return void.
2822  if (!ReturnValue.isValid()) {
2823  Builder.CreateRetVoid();
2824  return;
2825  }
2826 
2827  llvm::DebugLoc RetDbgLoc;
2828  llvm::Value *RV = nullptr;
2829  QualType RetTy = FI.getReturnType();
2830  const ABIArgInfo &RetAI = FI.getReturnInfo();
2831 
2832  switch (RetAI.getKind()) {
2833  case ABIArgInfo::InAlloca:
2834  // Aggregrates get evaluated directly into the destination. Sometimes we
2835  // need to return the sret value in a register, though.
2836  assert(hasAggregateEvaluationKind(RetTy));
2837  if (RetAI.getInAllocaSRet()) {
2838  llvm::Function::arg_iterator EI = CurFn->arg_end();
2839  --EI;
2840  llvm::Value *ArgStruct = &*EI;
2841  llvm::Value *SRet = Builder.CreateStructGEP(
2842  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2843  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2844  }
2845  break;
2846 
2847  case ABIArgInfo::Indirect: {
2848  auto AI = CurFn->arg_begin();
2849  if (RetAI.isSRetAfterThis())
2850  ++AI;
2851  switch (getEvaluationKind(RetTy)) {
2852  case TEK_Complex: {
2853  ComplexPairTy RT =
2854  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2855  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2856  /*isInit*/ true);
2857  break;
2858  }
2859  case TEK_Aggregate:
2860  // Do nothing; aggregrates get evaluated directly into the destination.
2861  break;
2862  case TEK_Scalar:
2863  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2864  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2865  /*isInit*/ true);
2866  break;
2867  }
2868  break;
2869  }
2870 
2871  case ABIArgInfo::Extend:
2872  case ABIArgInfo::Direct:
2873  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2874  RetAI.getDirectOffset() == 0) {
2875  // The internal return value temp always will have pointer-to-return-type
2876  // type, just do a load.
2877 
2878  // If there is a dominating store to ReturnValue, we can elide
2879  // the load, zap the store, and usually zap the alloca.
2880  if (llvm::StoreInst *SI =
2882  // Reuse the debug location from the store unless there is
2883  // cleanup code to be emitted between the store and return
2884  // instruction.
2885  if (EmitRetDbgLoc && !AutoreleaseResult)
2886  RetDbgLoc = SI->getDebugLoc();
2887  // Get the stored value and nuke the now-dead store.
2888  RV = SI->getValueOperand();
2889  SI->eraseFromParent();
2890 
2891  // If that was the only use of the return value, nuke it as well now.
2892  auto returnValueInst = ReturnValue.getPointer();
2893  if (returnValueInst->use_empty()) {
2894  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2895  alloca->eraseFromParent();
2896  ReturnValue = Address::invalid();
2897  }
2898  }
2899 
2900  // Otherwise, we have to do a simple load.
2901  } else {
2902  RV = Builder.CreateLoad(ReturnValue);
2903  }
2904  } else {
2905  // If the value is offset in memory, apply the offset now.
2906  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2907 
2908  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2909  }
2910 
2911  // In ARC, end functions that return a retainable type with a call
2912  // to objc_autoreleaseReturnValue.
2913  if (AutoreleaseResult) {
2914 #ifndef NDEBUG
2915  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2916  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2917  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2918  // CurCodeDecl or BlockInfo.
2919  QualType RT;
2920 
2921  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2922  RT = FD->getReturnType();
2923  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2924  RT = MD->getReturnType();
2925  else if (isa<BlockDecl>(CurCodeDecl))
2926  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2927  else
2928  llvm_unreachable("Unexpected function/method type");
2929 
2930  assert(getLangOpts().ObjCAutoRefCount &&
2931  !FI.isReturnsRetained() &&
2932  RT->isObjCRetainableType());
2933 #endif
2934  RV = emitAutoreleaseOfResult(*this, RV);
2935  }
2936 
2937  break;
2938 
2939  case ABIArgInfo::Ignore:
2940  break;
2941 
2943  auto coercionType = RetAI.getCoerceAndExpandType();
2944 
2945  // Load all of the coerced elements out into results.
2947  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2948  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2949  auto coercedEltType = coercionType->getElementType(i);
2950  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2951  continue;
2952 
2953  auto eltAddr = Builder.CreateStructGEP(addr, i);
2954  auto elt = Builder.CreateLoad(eltAddr);
2955  results.push_back(elt);
2956  }
2957 
2958  // If we have one result, it's the single direct result type.
2959  if (results.size() == 1) {
2960  RV = results[0];
2961 
2962  // Otherwise, we need to make a first-class aggregate.
2963  } else {
2964  // Construct a return type that lacks padding elements.
2965  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2966 
2967  RV = llvm::UndefValue::get(returnType);
2968  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2969  RV = Builder.CreateInsertValue(RV, results[i], i);
2970  }
2971  }
2972  break;
2973  }
2974 
2975  case ABIArgInfo::Expand:
2976  llvm_unreachable("Invalid ABI kind for return argument");
2977  }
2978 
2979  llvm::Instruction *Ret;
2980  if (RV) {
2981  EmitReturnValueCheck(RV);
2982  Ret = Builder.CreateRet(RV);
2983  } else {
2984  Ret = Builder.CreateRetVoid();
2985  }
2986 
2987  if (RetDbgLoc)
2988  Ret->setDebugLoc(std::move(RetDbgLoc));
2989 }
2990 
2992  // A current decl may not be available when emitting vtable thunks.
2993  if (!CurCodeDecl)
2994  return;
2995 
2996  ReturnsNonNullAttr *RetNNAttr = nullptr;
2997  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2998  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2999 
3000  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3001  return;
3002 
3003  // Prefer the returns_nonnull attribute if it's present.
3004  SourceLocation AttrLoc;
3005  SanitizerMask CheckKind;
3006  SanitizerHandler Handler;
3007  if (RetNNAttr) {
3008  assert(!requiresReturnValueNullabilityCheck() &&
3009  "Cannot check nullability and the nonnull attribute");
3010  AttrLoc = RetNNAttr->getLocation();
3011  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3012  Handler = SanitizerHandler::NonnullReturn;
3013  } else {
3014  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3015  if (auto *TSI = DD->getTypeSourceInfo())
3016  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3017  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3018  CheckKind = SanitizerKind::NullabilityReturn;
3019  Handler = SanitizerHandler::NullabilityReturn;
3020  }
3021 
3022  SanitizerScope SanScope(this);
3023 
3024  // Make sure the "return" source location is valid. If we're checking a
3025  // nullability annotation, make sure the preconditions for the check are met.
3026  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3027  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3028  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3029  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3030  if (requiresReturnValueNullabilityCheck())
3031  CanNullCheck =
3032  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3033  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3034  EmitBlock(Check);
3035 
3036  // Now do the null check.
3037  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3038  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3039  llvm::Value *DynamicData[] = {SLocPtr};
3040  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3041 
3042  EmitBlock(NoCheck);
3043 
3044 #ifndef NDEBUG
3045  // The return location should not be used after the check has been emitted.
3046  ReturnLocation = Address::invalid();
3047 #endif
3048 }
3049 
3051  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3052  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3053 }
3054 
3056  QualType Ty) {
3057  // FIXME: Generate IR in one pass, rather than going back and fixing up these
3058  // placeholders.
3059  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3060  llvm::Type *IRPtrTy = IRTy->getPointerTo();
3061  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3062 
3063  // FIXME: When we generate this IR in one pass, we shouldn't need
3064  // this win32-specific alignment hack.
3066  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3067 
3068  return AggValueSlot::forAddr(Address(Placeholder, Align),
3069  Ty.getQualifiers(),
3074 }
3075 
3077  const VarDecl *param,
3078  SourceLocation loc) {
3079  // StartFunction converted the ABI-lowered parameter(s) into a
3080  // local alloca. We need to turn that into an r-value suitable
3081  // for EmitCall.
3082  Address local = GetAddrOfLocalVar(param);
3083 
3084  QualType type = param->getType();
3085 
3086  if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3087  CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3088  }
3089 
3090  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3091  // but the argument needs to be the original pointer.
3092  if (type->isReferenceType()) {
3093  args.add(RValue::get(Builder.CreateLoad(local)), type);
3094 
3095  // In ARC, move out of consumed arguments so that the release cleanup
3096  // entered by StartFunction doesn't cause an over-release. This isn't
3097  // optimal -O0 code generation, but it should get cleaned up when
3098  // optimization is enabled. This also assumes that delegate calls are
3099  // performed exactly once for a set of arguments, but that should be safe.
3100  } else if (getLangOpts().ObjCAutoRefCount &&
3101  param->hasAttr<NSConsumedAttr>() &&
3102  type->isObjCRetainableType()) {
3103  llvm::Value *ptr = Builder.CreateLoad(local);
3104  auto null =
3105  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3106  Builder.CreateStore(null, local);
3107  args.add(RValue::get(ptr), type);
3108 
3109  // For the most part, we just need to load the alloca, except that
3110  // aggregate r-values are actually pointers to temporaries.
3111  } else {
3112  args.add(convertTempToRValue(local, type, loc), type);
3113  }
3114 
3115  // Deactivate the cleanup for the callee-destructed param that was pushed.
3116  if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3118  type.isDestructedType()) {
3120  CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3121  assert(cleanup.isValid() &&
3122  "cleanup for callee-destructed param not recorded");
3123  // This unreachable is a temporary marker which will be removed later.
3124  llvm::Instruction *isActive = Builder.CreateUnreachable();
3125  args.addArgCleanupDeactivation(cleanup, isActive);
3126  }
3127 }
3128 
3129 static bool isProvablyNull(llvm::Value *addr) {
3130  return isa<llvm::ConstantPointerNull>(addr);
3131 }
3132 
3133 /// Emit the actual writing-back of a writeback.
3135  const CallArgList::Writeback &writeback) {
3136  const LValue &srcLV = writeback.Source;
3137  Address srcAddr = srcLV.getAddress();
3138  assert(!isProvablyNull(srcAddr.getPointer()) &&
3139  "shouldn't have writeback for provably null argument");
3140 
3141  llvm::BasicBlock *contBB = nullptr;
3142 
3143  // If the argument wasn't provably non-null, we need to null check
3144  // before doing the store.
3145  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3146  CGF.CGM.getDataLayout());
3147  if (!provablyNonNull) {
3148  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3149  contBB = CGF.createBasicBlock("icr.done");
3150 
3151  llvm::Value *isNull =
3152  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3153  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3154  CGF.EmitBlock(writebackBB);
3155  }
3156 
3157  // Load the value to writeback.
3158  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3159 
3160  // Cast it back, in case we're writing an id to a Foo* or something.
3161  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3162  "icr.writeback-cast");
3163 
3164  // Perform the writeback.
3165 
3166  // If we have a "to use" value, it's something we need to emit a use
3167  // of. This has to be carefully threaded in: if it's done after the
3168  // release it's potentially undefined behavior (and the optimizer
3169  // will ignore it), and if it happens before the retain then the
3170  // optimizer could move the release there.
3171  if (writeback.ToUse) {
3172  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3173 
3174  // Retain the new value. No need to block-copy here: the block's
3175  // being passed up the stack.
3176  value = CGF.EmitARCRetainNonBlock(value);
3177 
3178  // Emit the intrinsic use here.
3179  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3180 
3181  // Load the old value (primitively).
3182  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3183 
3184  // Put the new value in place (primitively).
3185  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3186 
3187  // Release the old value.
3188  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3189 
3190  // Otherwise, we can just do a normal lvalue store.
3191  } else {
3192  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3193  }
3194 
3195  // Jump to the continuation block.
3196  if (!provablyNonNull)
3197  CGF.EmitBlock(contBB);
3198 }
3199 
3201  const CallArgList &args) {
3202  for (const auto &I : args.writebacks())
3203  emitWriteback(CGF, I);
3204 }
3205 
3207  const CallArgList &CallArgs) {
3209  CallArgs.getCleanupsToDeactivate();
3210  // Iterate in reverse to increase the likelihood of popping the cleanup.
3211  for (const auto &I : llvm::reverse(Cleanups)) {
3212  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3213  I.IsActiveIP->eraseFromParent();
3214  }
3215 }
3216 
3217 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3218  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3219  if (uop->getOpcode() == UO_AddrOf)
3220  return uop->getSubExpr();
3221  return nullptr;
3222 }
3223 
3224 /// Emit an argument that's being passed call-by-writeback. That is,
3225 /// we are passing the address of an __autoreleased temporary; it
3226 /// might be copy-initialized with the current value of the given
3227 /// address, but it will definitely be copied out of after the call.
3229  const ObjCIndirectCopyRestoreExpr *CRE) {
3230  LValue srcLV;
3231 
3232  // Make an optimistic effort to emit the address as an l-value.
3233  // This can fail if the argument expression is more complicated.
3234  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3235  srcLV = CGF.EmitLValue(lvExpr);
3236 
3237  // Otherwise, just emit it as a scalar.
3238  } else {
3239  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3240 
3241  QualType srcAddrType =
3242  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3243  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3244  }
3245  Address srcAddr = srcLV.getAddress();
3246 
3247  // The dest and src types don't necessarily match in LLVM terms
3248  // because of the crazy ObjC compatibility rules.
3249 
3250  llvm::PointerType *destType =
3251  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3252 
3253  // If the address is a constant null, just pass the appropriate null.
3254  if (isProvablyNull(srcAddr.getPointer())) {
3255  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3256  CRE->getType());
3257  return;
3258  }
3259 
3260  // Create the temporary.
3261  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3262  CGF.getPointerAlign(),
3263  "icr.temp");
3264  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3265  // and that cleanup will be conditional if we can't prove that the l-value
3266  // isn't null, so we need to register a dominating point so that the cleanups
3267  // system will make valid IR.
3269 
3270  // Zero-initialize it if we're not doing a copy-initialization.
3271  bool shouldCopy = CRE->shouldCopy();
3272  if (!shouldCopy) {
3273  llvm::Value *null =
3274  llvm::ConstantPointerNull::get(
3275  cast<llvm::PointerType>(destType->getElementType()));
3276  CGF.Builder.CreateStore(null, temp);
3277  }
3278 
3279  llvm::BasicBlock *contBB = nullptr;
3280  llvm::BasicBlock *originBB = nullptr;
3281 
3282  // If the address is *not* known to be non-null, we need to switch.
3283  llvm::Value *finalArgument;
3284 
3285  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3286  CGF.CGM.getDataLayout());
3287  if (provablyNonNull) {
3288  finalArgument = temp.getPointer();
3289  } else {
3290  llvm::Value *isNull =
3291  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3292 
3293  finalArgument = CGF.Builder.CreateSelect(isNull,
3294  llvm::ConstantPointerNull::get(destType),
3295  temp.getPointer(), "icr.argument");
3296 
3297  // If we need to copy, then the load has to be conditional, which
3298  // means we need control flow.
3299  if (shouldCopy) {
3300  originBB = CGF.Builder.GetInsertBlock();
3301  contBB = CGF.createBasicBlock("icr.cont");
3302  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3303  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3304  CGF.EmitBlock(copyBB);
3305  condEval.begin(CGF);
3306  }
3307  }
3308 
3309  llvm::Value *valueToUse = nullptr;
3310 
3311  // Perform a copy if necessary.
3312  if (shouldCopy) {
3313  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3314  assert(srcRV.isScalar());
3315 
3316  llvm::Value *src = srcRV.getScalarVal();
3317  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3318  "icr.cast");
3319 
3320  // Use an ordinary store, not a store-to-lvalue.
3321  CGF.Builder.CreateStore(src, temp);
3322 
3323  // If optimization is enabled, and the value was held in a
3324  // __strong variable, we need to tell the optimizer that this
3325  // value has to stay alive until we're doing the store back.
3326  // This is because the temporary is effectively unretained,
3327  // and so otherwise we can violate the high-level semantics.
3328  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3330  valueToUse = src;
3331  }
3332  }
3333 
3334  // Finish the control flow if we needed it.
3335  if (shouldCopy && !provablyNonNull) {
3336  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3337  CGF.EmitBlock(contBB);
3338 
3339  // Make a phi for the value to intrinsically use.
3340  if (valueToUse) {
3341  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3342  "icr.to-use");
3343  phiToUse->addIncoming(valueToUse, copyBB);
3344  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3345  originBB);
3346  valueToUse = phiToUse;
3347  }
3348 
3349  condEval.end(CGF);
3350  }
3351 
3352  args.addWriteback(srcLV, temp, valueToUse);
3353  args.add(RValue::get(finalArgument), CRE->getType());
3354 }
3355 
3357  assert(!StackBase);
3358 
3359  // Save the stack.
3360  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3361  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3362 }
3363 
3365  if (StackBase) {
3366  // Restore the stack after the call.
3367  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3368  CGF.Builder.CreateCall(F, StackBase);
3369  }
3370 }
3371 
3373  SourceLocation ArgLoc,
3374  AbstractCallee AC,
3375  unsigned ParmNum) {
3376  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3377  SanOpts.has(SanitizerKind::NullabilityArg)))
3378  return;
3379 
3380  // The param decl may be missing in a variadic function.
3381  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3382  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3383 
3384  // Prefer the nonnull attribute if it's present.
3385  const NonNullAttr *NNAttr = nullptr;
3386  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3387  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3388 
3389  bool CanCheckNullability = false;
3390  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3391  auto Nullability = PVD->getType()->getNullability(getContext());
3392  CanCheckNullability = Nullability &&
3394  PVD->getTypeSourceInfo();
3395  }
3396 
3397  if (!NNAttr && !CanCheckNullability)
3398  return;
3399 
3400  SourceLocation AttrLoc;
3401  SanitizerMask CheckKind;
3402  SanitizerHandler Handler;
3403  if (NNAttr) {
3404  AttrLoc = NNAttr->getLocation();
3405  CheckKind = SanitizerKind::NonnullAttribute;
3406  Handler = SanitizerHandler::NonnullArg;
3407  } else {
3408  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3409  CheckKind = SanitizerKind::NullabilityArg;
3410  Handler = SanitizerHandler::NullabilityArg;
3411  }
3412 
3413  SanitizerScope SanScope(this);
3414  assert(RV.isScalar());
3415  llvm::Value *V = RV.getScalarVal();
3416  llvm::Value *Cond =
3417  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3418  llvm::Constant *StaticData[] = {
3419  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3420  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3421  };
3422  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3423 }
3424 
3426  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3427  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3428  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3429  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3430 
3431  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3432  // because arguments are destroyed left to right in the callee. As a special
3433  // case, there are certain language constructs that require left-to-right
3434  // evaluation, and in those cases we consider the evaluation order requirement
3435  // to trump the "destruction order is reverse construction order" guarantee.
3436  bool LeftToRight =
3437  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3438  ? Order == EvaluationOrder::ForceLeftToRight
3439  : Order != EvaluationOrder::ForceRightToLeft;
3440 
3441  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3442  RValue EmittedArg) {
3443  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3444  return;
3445  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3446  if (PS == nullptr)
3447  return;
3448 
3449  const auto &Context = getContext();
3450  auto SizeTy = Context.getSizeType();
3451  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3452  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3453  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3454  EmittedArg.getScalarVal(),
3455  PS->isDynamic());
3456  Args.add(RValue::get(V), SizeTy);
3457  // If we're emitting args in reverse, be sure to do so with
3458  // pass_object_size, as well.
3459  if (!LeftToRight)
3460  std::swap(Args.back(), *(&Args.back() - 1));
3461  };
3462 
3463  // Insert a stack save if we're going to need any inalloca args.
3464  bool HasInAllocaArgs = false;
3465  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3466  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3467  I != E && !HasInAllocaArgs; ++I)
3468  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3469  if (HasInAllocaArgs) {
3470  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3471  Args.allocateArgumentMemory(*this);
3472  }
3473  }
3474 
3475  // Evaluate each argument in the appropriate order.
3476  size_t CallArgsStart = Args.size();
3477  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3478  unsigned Idx = LeftToRight ? I : E - I - 1;
3479  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3480  unsigned InitialArgSize = Args.size();
3481  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3482  // the argument and parameter match or the objc method is parameterized.
3483  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3484  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3485  ArgTypes[Idx]) ||
3486  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3487  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3488  "Argument and parameter types don't match");
3489  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3490  // In particular, we depend on it being the last arg in Args, and the
3491  // objectsize bits depend on there only being one arg if !LeftToRight.
3492  assert(InitialArgSize + 1 == Args.size() &&
3493  "The code below depends on only adding one arg per EmitCallArg");
3494  (void)InitialArgSize;
3495  // Since pointer argument are never emitted as LValue, it is safe to emit
3496  // non-null argument check for r-value only.
3497  if (!Args.back().hasLValue()) {
3498  RValue RVArg = Args.back().getKnownRValue();
3499  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3500  ParamsToSkip + Idx);
3501  // @llvm.objectsize should never have side-effects and shouldn't need
3502  // destruction/cleanups, so we can safely "emit" it after its arg,
3503  // regardless of right-to-leftness
3504  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3505  }
3506  }
3507 
3508  if (!LeftToRight) {
3509  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3510  // IR function.
3511  std::reverse(Args.begin() + CallArgsStart, Args.end());
3512  }
3513 }
3514 
3515 namespace {
3516 
3517 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3518  DestroyUnpassedArg(Address Addr, QualType Ty)
3519  : Addr(Addr), Ty(Ty) {}
3520 
3521  Address Addr;
3522  QualType Ty;
3523 
3524  void Emit(CodeGenFunction &CGF, Flags flags) override {
3526  if (DtorKind == QualType::DK_cxx_destructor) {
3527  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3528  assert(!Dtor->isTrivial());
3529  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3530  /*Delegating=*/false, Addr);
3531  } else {
3532  CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3533  }
3534  }
3535 };
3536 
3537 struct DisableDebugLocationUpdates {
3538  CodeGenFunction &CGF;
3539  bool disabledDebugInfo;
3540  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3541  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3542  CGF.disableDebugInfo();
3543  }
3544  ~DisableDebugLocationUpdates() {
3545  if (disabledDebugInfo)
3546  CGF.enableDebugInfo();
3547  }
3548 };
3549 
3550 } // end anonymous namespace
3551 
3553  if (!HasLV)
3554  return RV;
3555  LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3557  LV.isVolatile());
3558  IsUsed = true;
3559  return RValue::getAggregate(Copy.getAddress());
3560 }
3561 
3563  LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3564  if (!HasLV && RV.isScalar())
3565  CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3566  else if (!HasLV && RV.isComplex())
3567  CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3568  else {
3569  auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3570  LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3571  // We assume that call args are never copied into subobjects.
3572  CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3573  HasLV ? LV.isVolatileQualified()
3574  : RV.isVolatileQualified());
3575  }
3576  IsUsed = true;
3577 }
3578 
3580  QualType type) {
3581  DisableDebugLocationUpdates Dis(*this, E);
3582  if (const ObjCIndirectCopyRestoreExpr *CRE
3583  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3584  assert(getLangOpts().ObjCAutoRefCount);
3585  return emitWritebackArg(*this, args, CRE);
3586  }
3587 
3588  assert(type->isReferenceType() == E->isGLValue() &&
3589  "reference binding to unmaterialized r-value!");
3590 
3591  if (E->isGLValue()) {
3592  assert(E->getObjectKind() == OK_Ordinary);
3593  return args.add(EmitReferenceBindingToExpr(E), type);
3594  }
3595 
3596  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3597 
3598  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3599  // However, we still have to push an EH-only cleanup in case we unwind before
3600  // we make it to the call.
3601  if (HasAggregateEvalKind &&
3603  // If we're using inalloca, use the argument memory. Otherwise, use a
3604  // temporary.
3605  AggValueSlot Slot;
3606  if (args.isUsingInAlloca())
3607  Slot = createPlaceholderSlot(*this, type);
3608  else
3609  Slot = CreateAggTemp(type, "agg.tmp");
3610 
3611  bool DestroyedInCallee = true, NeedsEHCleanup = true;
3612  if (const auto *RD = type->getAsCXXRecordDecl())
3613  DestroyedInCallee = RD->hasNonTrivialDestructor();
3614  else
3615  NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3616 
3617  if (DestroyedInCallee)
3618  Slot.setExternallyDestructed();
3619 
3620  EmitAggExpr(E, Slot);
3621  RValue RV = Slot.asRValue();
3622  args.add(RV, type);
3623 
3624  if (DestroyedInCallee && NeedsEHCleanup) {
3625  // Create a no-op GEP between the placeholder and the cleanup so we can
3626  // RAUW it successfully. It also serves as a marker of the first
3627  // instruction where the cleanup is active.
3628  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3629  type);
3630  // This unreachable is a temporary marker which will be removed later.
3631  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3632  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3633  }
3634  return;
3635  }
3636 
3637  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3638  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3639  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3640  assert(L.isSimple());
3641  args.addUncopiedAggregate(L, type);
3642  return;
3643  }
3644 
3645  args.add(EmitAnyExprToTemp(E), type);
3646 }
3647 
3648 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3649  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3650  // implicitly widens null pointer constants that are arguments to varargs
3651  // functions to pointer-sized ints.
3652  if (!getTarget().getTriple().isOSWindows())
3653  return Arg->getType();
3654 
3655  if (Arg->getType()->isIntegerType() &&
3656  getContext().getTypeSize(Arg->getType()) <
3660  return getContext().getIntPtrType();
3661  }
3662 
3663  return Arg->getType();
3664 }
3665 
3666 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3667 // optimizer it can aggressively ignore unwind edges.
3668 void
3669 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3670  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3671  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3672  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3673  CGM.getNoObjCARCExceptionsMetadata());
3674 }
3675 
3676 /// Emits a call to the given no-arguments nounwind runtime function.
3677 llvm::CallInst *
3678 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3679  const llvm::Twine &name) {
3680  return EmitNounwindRuntimeCall(callee, None, name);
3681 }
3682 
3683 /// Emits a call to the given nounwind runtime function.
3684 llvm::CallInst *
3685 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3687  const llvm::Twine &name) {
3688  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3689  call->setDoesNotThrow();
3690  return call;
3691 }
3692 
3693 /// Emits a simple call (never an invoke) to the given no-arguments
3694 /// runtime function.
3695 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3696  const llvm::Twine &name) {
3697  return EmitRuntimeCall(callee, None, name);
3698 }
3699 
3700 // Calls which may throw must have operand bundles indicating which funclet
3701 // they are nested within.
3705  // There is no need for a funclet operand bundle if we aren't inside a
3706  // funclet.
3707  if (!CurrentFuncletPad)
3708  return BundleList;
3709 
3710  // Skip intrinsics which cannot throw.
3711  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3712  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3713  return BundleList;
3714 
3715  BundleList.emplace_back("funclet", CurrentFuncletPad);
3716  return BundleList;
3717 }
3718 
3719 /// Emits a simple call (never an invoke) to the given runtime function.
3720 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3722  const llvm::Twine &name) {
3723  llvm::CallInst *call = Builder.CreateCall(
3724  callee, args, getBundlesForFunclet(callee.getCallee()), name);
3725  call->setCallingConv(getRuntimeCC());
3726  return call;
3727 }
3728 
3729 /// Emits a call or invoke to the given noreturn runtime function.
3731  llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
3733  getBundlesForFunclet(callee.getCallee());
3734 
3735  if (getInvokeDest()) {
3736  llvm::InvokeInst *invoke =
3737  Builder.CreateInvoke(callee,
3738  getUnreachableBlock(),
3739  getInvokeDest(),
3740  args,
3741  BundleList);
3742  invoke->setDoesNotReturn();
3743  invoke->setCallingConv(getRuntimeCC());
3744  } else {
3745  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3746  call->setDoesNotReturn();
3747  call->setCallingConv(getRuntimeCC());
3748  Builder.CreateUnreachable();
3749  }
3750 }
3751 
3752 /// Emits a call or invoke instruction to the given nullary runtime function.
3753 llvm::CallBase *
3754 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3755  const Twine &name) {
3756  return EmitRuntimeCallOrInvoke(callee, None, name);
3757 }
3758 
3759 /// Emits a call or invoke instruction to the given runtime function.
3760 llvm::CallBase *
3761 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3763  const Twine &name) {
3764  llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
3765  call->setCallingConv(getRuntimeCC());
3766  return call;
3767 }
3768 
3769 /// Emits a call or invoke instruction to the given function, depending
3770 /// on the current state of the EH stack.
3771 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
3773  const Twine &Name) {
3774  llvm::BasicBlock *InvokeDest = getInvokeDest();
3776  getBundlesForFunclet(Callee.getCallee());
3777 
3778  llvm::CallBase *Inst;
3779  if (!InvokeDest)
3780  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3781  else {
3782  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3783  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3784  Name);
3785  EmitBlock(ContBB);
3786  }
3787 
3788  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3789  // optimizer it can aggressively ignore unwind edges.
3790  if (CGM.getLangOpts().ObjCAutoRefCount)
3791  AddObjCARCExceptionMetadata(Inst);
3792 
3793  return Inst;
3794 }
3795 
3796 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3797  llvm::Value *New) {
3798  DeferredReplacements.push_back(std::make_pair(Old, New));
3799 }
3800 
3802  const CGCallee &Callee,
3803  ReturnValueSlot ReturnValue,
3804  const CallArgList &CallArgs,
3805  llvm::CallBase **callOrInvoke,
3806  SourceLocation Loc) {
3807  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3808 
3809  assert(Callee.isOrdinary() || Callee.isVirtual());
3810 
3811  // Handle struct-return functions by passing a pointer to the
3812  // location that we would like to return into.
3813  QualType RetTy = CallInfo.getReturnType();
3814  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3815 
3816  llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
3817 
3818 #ifndef NDEBUG
3819  if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
3820  // For an inalloca varargs function, we don't expect CallInfo to match the
3821  // function pointer's type, because the inalloca struct a will have extra
3822  // fields in it for the varargs parameters. Code later in this function
3823  // bitcasts the function pointer to the type derived from CallInfo.
3824  //
3825  // In other cases, we assert that the types match up (until pointers stop
3826  // having pointee types).
3827  llvm::Type *TypeFromVal;
3828  if (Callee.isVirtual())
3829  TypeFromVal = Callee.getVirtualFunctionType();
3830  else
3831  TypeFromVal =
3832  Callee.getFunctionPointer()->getType()->getPointerElementType();
3833  assert(IRFuncTy == TypeFromVal);
3834  }
3835 #endif
3836 
3837  // 1. Set up the arguments.
3838 
3839  // If we're using inalloca, insert the allocation after the stack save.
3840  // FIXME: Do this earlier rather than hacking it in here!
3841  Address ArgMemory = Address::invalid();
3842  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3843  const llvm::DataLayout &DL = CGM.getDataLayout();
3844  llvm::Instruction *IP = CallArgs.getStackBase();
3845  llvm::AllocaInst *AI;
3846  if (IP) {
3847  IP = IP->getNextNode();
3848  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3849  "argmem", IP);
3850  } else {
3851  AI = CreateTempAlloca(ArgStruct, "argmem");
3852  }
3853  auto Align = CallInfo.getArgStructAlignment();
3854  AI->setAlignment(Align.getQuantity());
3855  AI->setUsedWithInAlloca(true);
3856  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3857  ArgMemory = Address(AI, Align);
3858  }
3859 
3860  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3861  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3862 
3863  // If the call returns a temporary with struct return, create a temporary
3864  // alloca to hold the result, unless one is given to us.
3865  Address SRetPtr = Address::invalid();
3866  Address SRetAlloca = Address::invalid();
3867  llvm::Value *UnusedReturnSizePtr = nullptr;
3868  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3869  if (!ReturnValue.isNull()) {
3870  SRetPtr = ReturnValue.getValue();
3871  } else {
3872  SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3873  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3874  uint64_t size =
3875  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3876  UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3877  }
3878  }
3879  if (IRFunctionArgs.hasSRetArg()) {
3880  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3881  } else if (RetAI.isInAlloca()) {
3882  Address Addr =
3883  Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
3884  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3885  }
3886  }
3887 
3888  Address swiftErrorTemp = Address::invalid();
3889  Address swiftErrorArg = Address::invalid();
3890 
3891  // Translate all of the arguments as necessary to match the IR lowering.
3892  assert(CallInfo.arg_size() == CallArgs.size() &&
3893  "Mismatch between function signature & arguments.");
3894  unsigned ArgNo = 0;
3895  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3896  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3897  I != E; ++I, ++info_it, ++ArgNo) {
3898  const ABIArgInfo &ArgInfo = info_it->info;
3899 
3900  // Insert a padding argument to ensure proper alignment.
3901  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3902  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3903  llvm::UndefValue::get(ArgInfo.getPaddingType());
3904 
3905  unsigned FirstIRArg, NumIRArgs;
3906  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3907 
3908  switch (ArgInfo.getKind()) {
3909  case ABIArgInfo::InAlloca: {
3910  assert(NumIRArgs == 0);
3911  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3912  if (I->isAggregate()) {
3913  // Replace the placeholder with the appropriate argument slot GEP.
3914  Address Addr = I->hasLValue()
3915  ? I->getKnownLValue().getAddress()
3916  : I->getKnownRValue().getAggregateAddress();
3917  llvm::Instruction *Placeholder =
3918  cast<llvm::Instruction>(Addr.getPointer());
3919  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3920  Builder.SetInsertPoint(Placeholder);
3921  Addr =
3922  Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3923  Builder.restoreIP(IP);
3924  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3925  } else {
3926  // Store the RValue into the argument struct.
3927  Address Addr =
3928  Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3929  unsigned AS = Addr.getType()->getPointerAddressSpace();
3930  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3931  // There are some cases where a trivial bitcast is not avoidable. The
3932  // definition of a type later in a translation unit may change it's type
3933  // from {}* to (%struct.foo*)*.
3934  if (Addr.getType() != MemType)
3935  Addr = Builder.CreateBitCast(Addr, MemType);
3936  I->copyInto(*this, Addr);
3937  }
3938  break;
3939  }
3940 
3941  case ABIArgInfo::Indirect: {
3942  assert(NumIRArgs == 1);
3943  if (!I->isAggregate()) {
3944  // Make a temporary alloca to pass the argument.
3945  Address Addr = CreateMemTempWithoutCast(
3946  I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3947  IRCallArgs[FirstIRArg] = Addr.getPointer();
3948 
3949  I->copyInto(*this, Addr);
3950  } else {
3951  // We want to avoid creating an unnecessary temporary+copy here;
3952  // however, we need one in three cases:
3953  // 1. If the argument is not byval, and we are required to copy the
3954  // source. (This case doesn't occur on any common architecture.)
3955  // 2. If the argument is byval, RV is not sufficiently aligned, and
3956  // we cannot force it to be sufficiently aligned.
3957  // 3. If the argument is byval, but RV is not located in default
3958  // or alloca address space.
3959  Address Addr = I->hasLValue()
3960  ? I->getKnownLValue().getAddress()
3961  : I->getKnownRValue().getAggregateAddress();
3962  llvm::Value *V = Addr.getPointer();
3963  CharUnits Align = ArgInfo.getIndirectAlign();
3964  const llvm::DataLayout *TD = &CGM.getDataLayout();
3965 
3966  assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3967  IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3968  TD->getAllocaAddrSpace()) &&
3969  "indirect argument must be in alloca address space");
3970 
3971  bool NeedCopy = false;
3972 
3973  if (Addr.getAlignment() < Align &&
3974  llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3975  Align.getQuantity()) {
3976  NeedCopy = true;
3977  } else if (I->hasLValue()) {
3978  auto LV = I->getKnownLValue();
3979  auto AS = LV.getAddressSpace();
3980 
3981  if ((!ArgInfo.getIndirectByVal() &&
3982  (LV.getAlignment() >=
3983  getContext().getTypeAlignInChars(I->Ty)))) {
3984  NeedCopy = true;
3985  }
3986  if (!getLangOpts().OpenCL) {
3987  if ((ArgInfo.getIndirectByVal() &&
3988  (AS != LangAS::Default &&
3989  AS != CGM.getASTAllocaAddressSpace()))) {
3990  NeedCopy = true;
3991  }
3992  }
3993  // For OpenCL even if RV is located in default or alloca address space
3994  // we don't want to perform address space cast for it.
3995  else if ((ArgInfo.getIndirectByVal() &&
3996  Addr.getType()->getAddressSpace() != IRFuncTy->
3997  getParamType(FirstIRArg)->getPointerAddressSpace())) {
3998  NeedCopy = true;
3999  }
4000  }
4001 
4002  if (NeedCopy) {
4003  // Create an aligned temporary, and copy to it.
4004  Address AI = CreateMemTempWithoutCast(
4005  I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4006  IRCallArgs[FirstIRArg] = AI.getPointer();
4007  I->copyInto(*this, AI);
4008  } else {
4009  // Skip the extra memcpy call.
4010  auto *T = V->getType()->getPointerElementType()->getPointerTo(
4011  CGM.getDataLayout().getAllocaAddrSpace());
4012  IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4013  *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4014  true);
4015  }
4016  }
4017  break;
4018  }
4019 
4020  case ABIArgInfo::Ignore:
4021  assert(NumIRArgs == 0);
4022  break;
4023 
4024  case ABIArgInfo::Extend:
4025  case ABIArgInfo::Direct: {
4026  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4027  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4028  ArgInfo.getDirectOffset() == 0) {
4029  assert(NumIRArgs == 1);
4030  llvm::Value *V;
4031  if (!I->isAggregate())
4032  V = I->getKnownRValue().getScalarVal();
4033  else
4034  V = Builder.CreateLoad(
4035  I->hasLValue() ? I->getKnownLValue().getAddress()
4036  : I->getKnownRValue().getAggregateAddress());
4037 
4038  // Implement swifterror by copying into a new swifterror argument.
4039  // We'll write back in the normal path out of the call.
4040  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4042  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4043 
4044  QualType pointeeTy = I->Ty->getPointeeType();
4045  swiftErrorArg =
4046  Address(V, getContext().getTypeAlignInChars(pointeeTy));
4047 
4048  swiftErrorTemp =
4049  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4050  V = swiftErrorTemp.getPointer();
4051  cast<llvm::AllocaInst>(V)->setSwiftError(true);
4052 
4053  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4054  Builder.CreateStore(errorValue, swiftErrorTemp);
4055  }
4056 
4057  // We might have to widen integers, but we should never truncate.
4058  if (ArgInfo.getCoerceToType() != V->getType() &&
4059  V->getType()->isIntegerTy())
4060  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4061 
4062  // If the argument doesn't match, perform a bitcast to coerce it. This
4063  // can happen due to trivial type mismatches.
4064  if (FirstIRArg < IRFuncTy->getNumParams() &&
4065  V->getType() != IRFuncTy->getParamType(FirstIRArg))
4066  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4067 
4068  IRCallArgs[FirstIRArg] = V;
4069  break;
4070  }
4071 
4072  // FIXME: Avoid the conversion through memory if possible.
4073  Address Src = Address::invalid();
4074  if (!I->isAggregate()) {
4075  Src = CreateMemTemp(I->Ty, "coerce");
4076  I->copyInto(*this, Src);
4077  } else {
4078  Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4079  : I->getKnownRValue().getAggregateAddress();
4080  }
4081 
4082  // If the value is offset in memory, apply the offset now.
4083  Src = emitAddressAtOffset(*this, Src, ArgInfo);
4084 
4085  // Fast-isel and the optimizer generally like scalar values better than
4086  // FCAs, so we flatten them if this is safe to do for this argument.
4087  llvm::StructType *STy =
4088  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4089  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4090  llvm::Type *SrcTy = Src.getType()->getElementType();
4091  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4092  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4093 
4094  // If the source type is smaller than the destination type of the
4095  // coerce-to logic, copy the source value into a temp alloca the size
4096  // of the destination type to allow loading all of it. The bits past
4097  // the source value are left undef.
4098  if (SrcSize < DstSize) {
4099  Address TempAlloca
4100  = CreateTempAlloca(STy, Src.getAlignment(),
4101  Src.getName() + ".coerce");
4102  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4103  Src = TempAlloca;
4104  } else {
4105  Src = Builder.CreateBitCast(Src,
4106  STy->getPointerTo(Src.getAddressSpace()));
4107  }
4108 
4109  assert(NumIRArgs == STy->getNumElements());
4110  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4111  Address EltPtr = Builder.CreateStructGEP(Src, i);
4112  llvm::Value *LI = Builder.CreateLoad(EltPtr);
4113  IRCallArgs[FirstIRArg + i] = LI;
4114  }
4115  } else {
4116  // In the simple case, just pass the coerced loaded value.
4117  assert(NumIRArgs == 1);
4118  IRCallArgs[FirstIRArg] =
4119  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4120  }
4121 
4122  break;
4123  }
4124 
4126  auto coercionType = ArgInfo.getCoerceAndExpandType();
4127  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4128 
4129  llvm::Value *tempSize = nullptr;
4130  Address addr = Address::invalid();
4131  Address AllocaAddr = Address::invalid();
4132  if (I->isAggregate()) {
4133  addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4134  : I->getKnownRValue().getAggregateAddress();
4135 
4136  } else {
4137  RValue RV = I->getKnownRValue();
4138  assert(RV.isScalar()); // complex should always just be direct
4139 
4140  llvm::Type *scalarType = RV.getScalarVal()->getType();
4141  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4142  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4143 
4144  // Materialize to a temporary.
4145  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4147  layout->getAlignment(), scalarAlign)),
4148  "tmp",
4149  /*ArraySize=*/nullptr, &AllocaAddr);
4150  tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4151 
4152  Builder.CreateStore(RV.getScalarVal(), addr);
4153  }
4154 
4155  addr = Builder.CreateElementBitCast(addr, coercionType);
4156 
4157  unsigned IRArgPos = FirstIRArg;
4158  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4159  llvm::Type *eltType = coercionType->getElementType(i);
4160  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4161  Address eltAddr = Builder.CreateStructGEP(addr, i);
4162  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4163  IRCallArgs[IRArgPos++] = elt;
4164  }
4165  assert(IRArgPos == FirstIRArg + NumIRArgs);
4166 
4167  if (tempSize) {
4168  EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4169  }
4170 
4171  break;
4172  }
4173 
4174  case ABIArgInfo::Expand:
4175  unsigned IRArgPos = FirstIRArg;
4176  ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4177  assert(IRArgPos == FirstIRArg + NumIRArgs);
4178  break;
4179  }
4180  }
4181 
4182  const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4183  llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4184 
4185  // If we're using inalloca, set up that argument.
4186  if (ArgMemory.isValid()) {
4187  llvm::Value *Arg = ArgMemory.getPointer();
4188  if (CallInfo.isVariadic()) {
4189  // When passing non-POD arguments by value to variadic functions, we will
4190  // end up with a variadic prototype and an inalloca call site. In such
4191  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4192  // the callee.
4193  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4194  CalleePtr =
4195  Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4196  } else {
4197  llvm::Type *LastParamTy =
4198  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4199  if (Arg->getType() != LastParamTy) {
4200 #ifndef NDEBUG
4201  // Assert that these structs have equivalent element types.
4202  llvm::StructType *FullTy = CallInfo.getArgStruct();
4203  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4204  cast<llvm::PointerType>(LastParamTy)->getElementType());
4205  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4206  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4207  DE = DeclaredTy->element_end(),
4208  FI = FullTy->element_begin();
4209  DI != DE; ++DI, ++FI)
4210  assert(*DI == *FI);
4211 #endif
4212  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4213  }
4214  }
4215  assert(IRFunctionArgs.hasInallocaArg());
4216  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4217  }
4218 
4219  // 2. Prepare the function pointer.
4220 
4221  // If the callee is a bitcast of a non-variadic function to have a
4222  // variadic function pointer type, check to see if we can remove the
4223  // bitcast. This comes up with unprototyped functions.
4224  //
4225  // This makes the IR nicer, but more importantly it ensures that we
4226  // can inline the function at -O0 if it is marked always_inline.
4227  auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4228  llvm::Value *Ptr) -> llvm::Function * {
4229  if (!CalleeFT->isVarArg())
4230  return nullptr;
4231 
4232  // Get underlying value if it's a bitcast
4233  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4234  if (CE->getOpcode() == llvm::Instruction::BitCast)
4235  Ptr = CE->getOperand(0);
4236  }
4237 
4238  llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4239  if (!OrigFn)
4240  return nullptr;
4241 
4242  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4243 
4244  // If the original type is variadic, or if any of the component types
4245  // disagree, we cannot remove the cast.
4246  if (OrigFT->isVarArg() ||
4247  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4248  OrigFT->getReturnType() != CalleeFT->getReturnType())
4249  return nullptr;
4250 
4251  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4252  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4253  return nullptr;
4254 
4255  return OrigFn;
4256  };
4257 
4258  if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4259  CalleePtr = OrigFn;
4260  IRFuncTy = OrigFn->getFunctionType();
4261  }
4262 
4263  // 3. Perform the actual call.
4264 
4265  // Deactivate any cleanups that we're supposed to do immediately before
4266  // the call.
4267  if (!CallArgs.getCleanupsToDeactivate().empty())
4268  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4269 
4270  // Assert that the arguments we computed match up. The IR verifier
4271  // will catch this, but this is a common enough source of problems
4272  // during IRGen changes that it's way better for debugging to catch
4273  // it ourselves here.
4274 #ifndef NDEBUG
4275  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4276  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4277  // Inalloca argument can have different type.
4278  if (IRFunctionArgs.hasInallocaArg() &&
4279  i == IRFunctionArgs.getInallocaArgNo())
4280  continue;
4281  if (i < IRFuncTy->getNumParams())
4282  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4283  }
4284 #endif
4285 
4286  // Update the largest vector width if any arguments have vector types.
4287  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4288  if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4289  LargestVectorWidth = std::max(LargestVectorWidth,
4290  VT->getPrimitiveSizeInBits());
4291  }
4292 
4293  // Compute the calling convention and attributes.
4294  unsigned CallingConv;
4295  llvm::AttributeList Attrs;
4296  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4297  Callee.getAbstractInfo(), Attrs, CallingConv,
4298  /*AttrOnCallSite=*/true);
4299 
4300  // Apply some call-site-specific attributes.
4301  // TODO: work this into building the attribute set.
4302 
4303  // Apply always_inline to all calls within flatten functions.
4304  // FIXME: should this really take priority over __try, below?
4305  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4306  !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
4307  Callee.getAbstractInfo()
4308  .getCalleeDecl()
4309  .getDecl()
4310  ->hasAttr<NoInlineAttr>())) {
4311  Attrs =
4312  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4313  llvm::Attribute::AlwaysInline);
4314  }
4315 
4316  // Disable inlining inside SEH __try blocks.
4317  if (isSEHTryScope()) {
4318  Attrs =
4319  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4320  llvm::Attribute::NoInline);
4321  }
4322 
4323  // Decide whether to use a call or an invoke.
4324  bool CannotThrow;
4325  if (currentFunctionUsesSEHTry()) {
4326  // SEH cares about asynchronous exceptions, so everything can "throw."
4327  CannotThrow = false;
4328  } else if (isCleanupPadScope() &&
4330  // The MSVC++ personality will implicitly terminate the program if an
4331  // exception is thrown during a cleanup outside of a try/catch.
4332  // We don't need to model anything in IR to get this behavior.
4333  CannotThrow = true;
4334  } else {
4335  // Otherwise, nounwind call sites will never throw.
4336  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4337  llvm::Attribute::NoUnwind);
4338  }
4339 
4340  // If we made a temporary, be sure to clean up after ourselves. Note that we
4341  // can't depend on being inside of an ExprWithCleanups, so we need to manually
4342  // pop this cleanup later on. Being eager about this is OK, since this
4343  // temporary is 'invisible' outside of the callee.
4344  if (UnusedReturnSizePtr)
4345  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4346  UnusedReturnSizePtr);
4347 
4348  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4349 
4351  getBundlesForFunclet(CalleePtr);
4352 
4353  // Emit the actual call/invoke instruction.
4354  llvm::CallBase *CI;
4355  if (!InvokeDest) {
4356  CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4357  } else {
4358  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4359  CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4360  BundleList);
4361  EmitBlock(Cont);
4362  }
4363  if (callOrInvoke)
4364  *callOrInvoke = CI;
4365 
4366  // Apply the attributes and calling convention.
4367  CI->setAttributes(Attrs);
4368  CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4369 
4370  // Apply various metadata.
4371 
4372  if (!CI->getType()->isVoidTy())
4373  CI->setName("call");
4374 
4375  // Update largest vector width from the return type.
4376  if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4377  LargestVectorWidth = std::max(LargestVectorWidth,
4378  VT->getPrimitiveSizeInBits());
4379 
4380  // Insert instrumentation or attach profile metadata at indirect call sites.
4381  // For more details, see the comment before the definition of
4382  // IPVK_IndirectCallTarget in InstrProfData.inc.
4383  if (!CI->getCalledFunction())
4384  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4385  CI, CalleePtr);
4386 
4387  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4388  // optimizer it can aggressively ignore unwind edges.
4389  if (CGM.getLangOpts().ObjCAutoRefCount)
4390  AddObjCARCExceptionMetadata(CI);
4391 
4392  // Suppress tail calls if requested.
4393  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4394  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4395  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4396  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4397  }
4398 
4399  // 4. Finish the call.
4400 
4401  // If the call doesn't return, finish the basic block and clear the
4402  // insertion point; this allows the rest of IRGen to discard
4403  // unreachable code.
4404  if (CI->doesNotReturn()) {
4405  if (UnusedReturnSizePtr)
4406  PopCleanupBlock();
4407 
4408  // Strip away the noreturn attribute to better diagnose unreachable UB.
4409  if (SanOpts.has(SanitizerKind::Unreachable)) {
4410  // Also remove from function since CallBase::hasFnAttr additionally checks
4411  // attributes of the called function.
4412  if (auto *F = CI->getCalledFunction())
4413  F->removeFnAttr(llvm::Attribute::NoReturn);
4414  CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4415  llvm::Attribute::NoReturn);
4416 
4417  // Avoid incompatibility with ASan which relies on the `noreturn`
4418  // attribute to insert handler calls.
4419  if (SanOpts.hasOneOf(SanitizerKind::Address |
4420  SanitizerKind::KernelAddress)) {
4421  SanitizerScope SanScope(this);
4422  llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4423  Builder.SetInsertPoint(CI);
4424  auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4425  llvm::FunctionCallee Fn =
4426  CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4427  EmitNounwindRuntimeCall(Fn);
4428  }
4429  }
4430 
4431  EmitUnreachable(Loc);
4432  Builder.ClearInsertionPoint();
4433 
4434  // FIXME: For now, emit a dummy basic block because expr emitters in
4435  // generally are not ready to handle emitting expressions at unreachable
4436  // points.
4437  EnsureInsertPoint();
4438 
4439  // Return a reasonable RValue.
4440  return GetUndefRValue(RetTy);
4441  }
4442 
4443  // Perform the swifterror writeback.
4444  if (swiftErrorTemp.isValid()) {
4445  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4446  Builder.CreateStore(errorResult, swiftErrorArg);
4447  }
4448 
4449  // Emit any call-associated writebacks immediately. Arguably this
4450  // should happen after any return-value munging.
4451  if (CallArgs.hasWritebacks())
4452  emitWritebacks(*this, CallArgs);
4453 
4454  // The stack cleanup for inalloca arguments has to run out of the normal
4455  // lexical order, so deactivate it and run it manually here.
4456  CallArgs.freeArgumentMemory(*this);
4457 
4458  // Extract the return value.
4459  RValue Ret = [&] {
4460  switch (RetAI.getKind()) {
4462  auto coercionType = RetAI.getCoerceAndExpandType();
4463 
4464  Address addr = SRetPtr;
4465  addr = Builder.CreateElementBitCast(addr, coercionType);
4466 
4467  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4468  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4469 
4470  unsigned unpaddedIndex = 0;
4471  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4472  llvm::Type *eltType = coercionType->getElementType(i);
4473  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4474  Address eltAddr = Builder.CreateStructGEP(addr, i);
4475  llvm::Value *elt = CI;
4476  if (requiresExtract)
4477  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4478  else
4479  assert(unpaddedIndex == 0);
4480  Builder.CreateStore(elt, eltAddr);
4481  }
4482  // FALLTHROUGH
4483  LLVM_FALLTHROUGH;
4484  }
4485 
4486  case ABIArgInfo::InAlloca:
4487  case ABIArgInfo::Indirect: {
4488  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4489  if (UnusedReturnSizePtr)
4490  PopCleanupBlock();
4491  return ret;
4492  }
4493 
4494  case ABIArgInfo::Ignore:
4495  // If we are ignoring an argument that had a result, make sure to
4496  // construct the appropriate return value for our caller.
4497  return GetUndefRValue(RetTy);
4498 
4499  case ABIArgInfo::Extend:
4500  case ABIArgInfo::Direct: {
4501  llvm::Type *RetIRTy = ConvertType(RetTy);
4502  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4503  switch (getEvaluationKind(RetTy)) {
4504  case TEK_Complex: {
4505  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4506  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4507  return RValue::getComplex(std::make_pair(Real, Imag));
4508  }
4509  case TEK_Aggregate: {
4510  Address DestPtr = ReturnValue.getValue();
4511  bool DestIsVolatile = ReturnValue.isVolatile();
4512 
4513  if (!DestPtr.isValid()) {
4514  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4515  DestIsVolatile = false;
4516  }
4517  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4518  return RValue::getAggregate(DestPtr);
4519  }
4520  case TEK_Scalar: {
4521  // If the argument doesn't match, perform a bitcast to coerce it. This
4522  // can happen due to trivial type mismatches.
4523  llvm::Value *V = CI;
4524  if (V->getType() != RetIRTy)
4525  V = Builder.CreateBitCast(V, RetIRTy);
4526  return RValue::get(V);
4527  }
4528  }
4529  llvm_unreachable("bad evaluation kind");
4530  }
4531 
4532  Address DestPtr = ReturnValue.getValue();
4533  bool DestIsVolatile = ReturnValue.isVolatile();
4534 
4535  if (!DestPtr.isValid()) {
4536  DestPtr = CreateMemTemp(RetTy, "coerce");
4537  DestIsVolatile = false;
4538  }
4539 
4540  // If the value is offset in memory, apply the offset now.
4541  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4542  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4543 
4544  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4545  }
4546 
4547  case ABIArgInfo::Expand:
4548  llvm_unreachable("Invalid ABI kind for return argument");
4549  }
4550 
4551  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4552  } ();
4553 
4554  // Emit the assume_aligned check on the return value.
4555  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4556  if (Ret.isScalar() && TargetDecl) {
4557  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4558  llvm::Value *OffsetValue = nullptr;
4559  if (const auto *Offset = AA->getOffset())
4560  OffsetValue = EmitScalarExpr(Offset);
4561 
4562  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4563  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4564  EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4565  AlignmentCI->getZExtValue(), OffsetValue);
4566  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4567  llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4568  .getRValue(*this)
4569  .getScalarVal();
4570  EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4571  AlignmentVal);
4572  }
4573  }
4574 
4575  return Ret;
4576 }
4577 
4579  if (isVirtual()) {
4580  const CallExpr *CE = getVirtualCallExpr();
4582  CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
4583  CE ? CE->getBeginLoc() : SourceLocation());
4584  }
4585 
4586  return *this;
4587 }
4588 
4589 /* VarArg handling */
4590 
4592  VAListAddr = VE->isMicrosoftABI()
4593  ? EmitMSVAListRef(VE->getSubExpr())
4594  : EmitVAListRef(VE->getSubExpr());
4595  QualType Ty = VE->getType();
4596  if (VE->isMicrosoftABI())
4597  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4598  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4599 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:660
const llvm::DataLayout & getDataLayout() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1524
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:178
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:363
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:178
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the &#39;this&#39; type for codegen purposes, i.e.
Definition: CGCall.cpp:73
Represents a function declaration or definition.
Definition: Decl.h:1737
Address getAddress() const
Definition: CGValue.h:582
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:635
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
Definition: CGCall.cpp:2991
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2547
Complete object ctor.
Definition: ABI.h:25
CanQualType VoidPtrTy
Definition: ASTContext.h:1043
A (possibly-)qualified type.
Definition: Type.h:639
bool isBlockPointerType() const
Definition: Type.h:6314
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses &#39;sret&#39; when used as a return type.
Definition: CGCall.cpp:1505
bool getNoCfCheck() const
Definition: Type.h:3525
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition: CGCall.cpp:264
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:87
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:138
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3134
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1121
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:75
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2726
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:570
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:176
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3365
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:505
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:966
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:4069
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:2244
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:837
Extend - Valid only for integer argument types.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1036
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4591
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3129
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:250
bool isVirtual() const
Definition: DeclCXX.h:2093
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Definition: CGCall.cpp:4578
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const Expr * getSubExpr() const
Definition: Expr.h:4138
void addUncopiedAggregate(LValue LV, QualType type)
Definition: CGCall.h:289
bool isVolatile() const
Definition: CGValue.h:300
The base class of the type hierarchy.
Definition: Type.h:1414
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1918
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2197
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:6146
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:967
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:689
const ParmVarDecl * getParamDecl(unsigned I) const
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:2105
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:377
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2491
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2177
bool hasWritebacks() const
Definition: CGCall.h:314
Default closure variant of a ctor.
Definition: ABI.h:29
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
Represents a variable declaration or definition.
Definition: Decl.h:812
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:112
llvm::Instruction * getStackBase() const
Definition: CGCall.h:336
unsigned getNumParams() const
Definition: Type.h:3898
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:183
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1225
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6766
void setCoerceToType(llvm::Type *T)
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3554
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:138
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:175
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3372
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:244
llvm::Value * getPointer() const
Definition: Address.h:37
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:305
Address getValue() const
Definition: CGCall.h:383
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:193
Represents a parameter to a function.
Definition: Decl.h:1549
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:56
void add(RValue rvalue, QualType type)
Definition: CGCall.h:287
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:44
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:418
Represents a struct/union/class.
Definition: Decl.h:3592
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3364
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:358
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2465
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1035
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3200
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2806
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:4007
Address getAddress() const
Definition: CGValue.h:326
unsigned getRegParm() const
Definition: Type.h:3528
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:4073
field_range fields() const
Definition: Decl.h:3783
bool isVolatileQualified() const
Definition: CGValue.h:257
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2335
Represents a member of a struct/union/class.
Definition: Decl.h:2578
CharUnits getAlignment() const
Definition: CGValue.h:315
RequiredArgs getRequiredArgs() const
bool isUsingInAlloca() const
Returns if we&#39;re using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:341
unsigned getFunctionScopeIndex() const
Returns the index of this parameter in its prototype or method scope.
Definition: Decl.h:1602
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:102
bool isOrdinary() const
Definition: CGCall.h:174
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:265
CharUnits getArgStructAlignment() const
bool isReferenceType() const
Definition: Type.h:6318
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2325
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that&#39;s being passed call-by-writeback.
Definition: CGCall.cpp:3228
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:513
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:52
bool isVirtual() const
Definition: CGCall.h:192
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Decl.h:738
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:323
bool getProducesResult() const
Definition: Type.h:3523
bool isGLValue() const
Definition: Expr.h:254
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:284
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2599
void copyInto(CodeGenFunction &CGF, Address A) const
Definition: CGCall.cpp:3562
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:156
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
llvm::StructType * getCoerceAndExpandType() const
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:214
Wrapper for source info for functions.
Definition: TypeLoc.h:1326
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:108
unsigned getInAllocaFieldIndex() const
const_arg_iterator arg_begin() const
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:70
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:395
LangAS getAddressSpace() const
Definition: Type.h:353
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1832
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:142
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:476
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:709
Values of this type can never be null.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
bool isSimple() const
Definition: CGValue.h:251
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:278
bool isInstance() const
Definition: DeclCXX.h:2076
An ordinary object is located at an address in memory.
Definition: Specifiers.h:125
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1707
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:105
FunctionType::ExtInfo getExtInfo() const
QualType getReturnType() const
Definition: DeclObjC.h:322
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:744
bool getNoReturn() const
Definition: Type.h:3522
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:83
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:70
bool getNoCallerSavedRegs() const
Definition: Type.h:3524
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3579
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:518
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Definition: TargetInfo.h:304
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3581
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:507
Represents a K&R-style &#39;int foo()&#39; function, which has no information available about its arguments...
Definition: Type.h:3660
bool hasAttr() const
Definition: DeclBase.h:533
CanQualType getReturnType() const
bool isValid() const
Definition: Address.h:35
unsigned getNumRequiredArgs() const
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1635
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3697
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:4143
const TargetCodeGenInfo & getTargetCodeGenInfo()
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
writeback_const_range writebacks() const
Definition: CGCall.h:319
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:308
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:3076
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4120
Address Temporary
The temporary alloca.
Definition: CGCall.h:273
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:106
unsigned Offset
Definition: Format.cpp:1640
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:276
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3431
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:3055
This represents one expression.
Definition: Expr.h:108
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2688
static Address invalid()
Definition: Address.h:34
llvm::Type * getUnpaddedCoerceAndExpandType() const
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:3050
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:718
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:96
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:65
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:702
bool getHasRegParm() const
Definition: Type.h:3526
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6829
bool isObjCRetainableType() const
Definition: Type.cpp:4000
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2713
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:43
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:62
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:361
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2587
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:3703
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when &#39;sret&#39; is used as a return type...
Definition: CGCall.cpp:1510
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:647
QualType getType() const
Definition: Expr.h:130
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1351
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes &#39;this&#39; as the first parameter followed by varargs.
Definition: CGCall.cpp:537
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2744
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:196
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1929
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:2025
ASTContext & getContext() const
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:413
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1171
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:235
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:34
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1277
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:414
CanQualType getCanonicalTypeUnqualified() const
LValue getKnownLValue() const
Definition: CGCall.h:242
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Definition: Type.h:4390
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1298
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:165
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses &#39;fpret&#39; when used as a return type.
Definition: CGCall.cpp:1515
CanProxy< U > castAs() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3217
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3433
Encodes a location in the source.
QualType getReturnType() const
Definition: Type.h:3623
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2212
A saved depth on the scope stack.
Definition: EHScopeStack.h:106
llvm::FunctionType * getVirtualFunctionType() const
Definition: CGCall.h:207
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3801
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:295
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3404
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1239
CallingConv getCC() const
Definition: Type.h:3535
const Decl * getDecl() const
Definition: GlobalDecl.h:68
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C &#39;SEL&#39; type.
Definition: ASTContext.h:1858
An aggregate value slot.
Definition: CGValue.h:436
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:463
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2048
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2426
const_arg_iterator arg_end() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ObjCEntrypoints & getObjCEntrypoints() const
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3356
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:727
CanQualType VoidTy
Definition: ASTContext.h:1015
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
bool isAnyPointerType() const
Definition: Type.h:6310
An aligned address.
Definition: Address.h:24
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after...
Definition: Type.h:1159
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:724
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:179
All available information about a concrete callee.
Definition: CGCall.h:66
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:369
Complete object dtor.
Definition: ABI.h:35
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses &#39;fp2ret&#39; when used as a return type.
Definition: CGCall.cpp:1532
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1693
bool hasFlexibleArrayMember() const
Definition: Decl.h:3646
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3932
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:628
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:553
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1135
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:358
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:177
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3771
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2397
ExtInfo getExtInfo() const
Definition: Type.h:3634
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:92
CodeGenFunction::ComplexPairTy ComplexPairTy
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:90
LValue Source
The original argument.
Definition: CGCall.h:270
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:439
Qualifiers getMethodQualifiers() const
Definition: DeclCXX.h:2195
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:90
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:1009
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Interesting information about a specific parameter that can&#39;t simply be reflected in parameter&#39;s type...
Definition: Type.h:3391
void EmitARCIntrinsicUse(ArrayRef< llvm::Value *> values)
Given a number of pointers, inform the optimizer that they&#39;re being intrinsically used up until this ...
Definition: CGObjC.cpp:1931
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2173
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
RValue getRValue(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3552
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:803
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:107
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:399
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value *> args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3730
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4380
Complex values, per C99 6.2.5p11.
Definition: Type.h:2487
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:75
Iterator for iterating over Stmt * arrays that contain only T *.
Definition: Stmt.h:1002
static bool classof(const OMPClause *T)
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:2043
QualType getCanonicalTypeInternal() const
Definition: Type.h:2365
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:6592
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable &#39;self&#39;, remove it.
Definition: CGCall.cpp:2687
CharUnits getIndirectAlign() const
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
T * getAttr() const
Definition: DeclBase.h:529
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:51
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:644
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:118
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:61