clang  9.0.0svn
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallSite.h"
35 #include "llvm/IR/CallingConv.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /***/
44 
46  switch (CC) {
47  default: return llvm::CallingConv::C;
48  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
49  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
50  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
51  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
52  case CC_Win64: return llvm::CallingConv::Win64;
53  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
54  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
55  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
56  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
57  // TODO: Add support for __pascal to LLVM.
59  // TODO: Add support for __vectorcall to LLVM.
60  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
61  case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
62  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66  case CC_Swift: return llvm::CallingConv::Swift;
67  }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
71 /// qualification.
72 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD,
73  const CXXMethodDecl *MD) {
74  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75  if (MD)
76  RecTy = Context.getAddrSpaceQualType(RecTy, MD->getTypeQualifiers().getAddressSpace());
77  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
78 }
79 
80 /// Returns the canonical formal type of the given C++ method.
82  return MD->getType()->getCanonicalTypeUnqualified()
84 }
85 
86 /// Returns the "extra-canonicalized" return type, which discards
87 /// qualifiers on the return type. Codegen doesn't care about them,
88 /// and it makes ABI code a little easier to be able to assume that
89 /// all parameter and return types are top-level unqualified.
92 }
93 
94 /// Arrange the argument and result information for a value of the given
95 /// unprototyped freestanding function type.
96 const CGFunctionInfo &
98  // When translating an unprototyped function type, always use a
99  // variadic type.
100  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
101  /*instanceMethod=*/false,
102  /*chainCall=*/false, None,
103  FTNP->getExtInfo(), {}, RequiredArgs(0));
104 }
105 
108  const FunctionProtoType *proto,
109  unsigned prefixArgs,
110  unsigned totalArgs) {
111  assert(proto->hasExtParameterInfos());
112  assert(paramInfos.size() <= prefixArgs);
113  assert(proto->getNumParams() + prefixArgs <= totalArgs);
114 
115  paramInfos.reserve(totalArgs);
116 
117  // Add default infos for any prefix args that don't already have infos.
118  paramInfos.resize(prefixArgs);
119 
120  // Add infos for the prototype.
121  for (const auto &ParamInfo : proto->getExtParameterInfos()) {
122  paramInfos.push_back(ParamInfo);
123  // pass_object_size params have no parameter info.
124  if (ParamInfo.hasPassObjectSize())
125  paramInfos.emplace_back();
126  }
127 
128  assert(paramInfos.size() <= totalArgs &&
129  "Did we forget to insert pass_object_size args?");
130  // Add default infos for the variadic and/or suffix arguments.
131  paramInfos.resize(totalArgs);
132 }
133 
134 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
135 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
136 static void appendParameterTypes(const CodeGenTypes &CGT,
140  // Fast path: don't touch param info if we don't need to.
141  if (!FPT->hasExtParameterInfos()) {
142  assert(paramInfos.empty() &&
143  "We have paramInfos, but the prototype doesn't?");
144  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
145  return;
146  }
147 
148  unsigned PrefixSize = prefix.size();
149  // In the vast majority of cases, we'll have precisely FPT->getNumParams()
150  // parameters; the only thing that can change this is the presence of
151  // pass_object_size. So, we preallocate for the common case.
152  prefix.reserve(prefix.size() + FPT->getNumParams());
153 
154  auto ExtInfos = FPT->getExtParameterInfos();
155  assert(ExtInfos.size() == FPT->getNumParams());
156  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
157  prefix.push_back(FPT->getParamType(I));
158  if (ExtInfos[I].hasPassObjectSize())
159  prefix.push_back(CGT.getContext().getSizeType());
160  }
161 
162  addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
163  prefix.size());
164 }
165 
166 /// Arrange the LLVM function layout for a value of the given function
167 /// type, on top of any implicit parameters already stored.
168 static const CGFunctionInfo &
169 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
172  const FunctionDecl *FD) {
173  SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
174  RequiredArgs Required =
175  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
176  // FIXME: Kill copy.
177  appendParameterTypes(CGT, prefix, paramInfos, FTP);
178  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
179 
180  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
181  /*chainCall=*/false, prefix,
182  FTP->getExtInfo(), paramInfos,
183  Required);
184 }
185 
186 /// Arrange the argument and result information for a value of the
187 /// given freestanding function type.
188 const CGFunctionInfo &
190  const FunctionDecl *FD) {
192  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
193  FTP, FD);
194 }
195 
196 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
197  // Set the appropriate calling convention for the Function.
198  if (D->hasAttr<StdCallAttr>())
199  return CC_X86StdCall;
200 
201  if (D->hasAttr<FastCallAttr>())
202  return CC_X86FastCall;
203 
204  if (D->hasAttr<RegCallAttr>())
205  return CC_X86RegCall;
206 
207  if (D->hasAttr<ThisCallAttr>())
208  return CC_X86ThisCall;
209 
210  if (D->hasAttr<VectorCallAttr>())
211  return CC_X86VectorCall;
212 
213  if (D->hasAttr<PascalAttr>())
214  return CC_X86Pascal;
215 
216  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
217  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
218 
219  if (D->hasAttr<AArch64VectorPcsAttr>())
220  return CC_AArch64VectorCall;
221 
222  if (D->hasAttr<IntelOclBiccAttr>())
223  return CC_IntelOclBicc;
224 
225  if (D->hasAttr<MSABIAttr>())
226  return IsWindows ? CC_C : CC_Win64;
227 
228  if (D->hasAttr<SysVABIAttr>())
229  return IsWindows ? CC_X86_64SysV : CC_C;
230 
231  if (D->hasAttr<PreserveMostAttr>())
232  return CC_PreserveMost;
233 
234  if (D->hasAttr<PreserveAllAttr>())
235  return CC_PreserveAll;
236 
237  return CC_C;
238 }
239 
240 /// Arrange the argument and result information for a call to an
241 /// unknown C++ non-static member function of the given abstract type.
242 /// (Zero value of RD means we don't have any meaningful "this" argument type,
243 /// so fall back to a generic pointer type).
244 /// The member function must be an ordinary function, i.e. not a
245 /// constructor or destructor.
246 const CGFunctionInfo &
248  const FunctionProtoType *FTP,
249  const CXXMethodDecl *MD) {
251 
252  // Add the 'this' pointer.
253  if (RD)
254  argTypes.push_back(GetThisType(Context, RD, MD));
255  else
256  argTypes.push_back(Context.VoidPtrTy);
257 
259  *this, true, argTypes,
261 }
262 
263 /// Set calling convention for CUDA/HIP kernel.
265  const FunctionDecl *FD) {
266  if (FD->hasAttr<CUDAGlobalAttr>()) {
267  const FunctionType *FT = FTy->getAs<FunctionType>();
269  FTy = FT->getCanonicalTypeUnqualified();
270  }
271 }
272 
273 /// Arrange the argument and result information for a declaration or
274 /// definition of the given C++ non-static member function. The
275 /// member function must be an ordinary function, i.e. not a
276 /// constructor or destructor.
277 const CGFunctionInfo &
279  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
280  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
281 
282  CanQualType FT = GetFormalType(MD).getAs<Type>();
283  setCUDAKernelCallingConvention(FT, CGM, MD);
284  auto prototype = FT.getAs<FunctionProtoType>();
285 
286  if (MD->isInstance()) {
287  // The abstract case is perfectly fine.
288  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
289  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
290  }
291 
292  return arrangeFreeFunctionType(prototype, MD);
293 }
294 
296  const InheritedConstructor &Inherited, CXXCtorType Type) {
297  // Parameters are unnecessary if we're constructing a base class subobject
298  // and the inherited constructor lives in a virtual base.
299  return Type == Ctor_Complete ||
300  !Inherited.getShadowDecl()->constructsVirtualBase() ||
301  !Target.getCXXABI().hasConstructorVariants();
302  }
303 
304 const CGFunctionInfo &
306  StructorType Type) {
307 
310  argTypes.push_back(GetThisType(Context, MD->getParent(), MD));
311 
312  bool PassParams = true;
313 
314  GlobalDecl GD;
315  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
316  GD = GlobalDecl(CD, toCXXCtorType(Type));
317 
318  // A base class inheriting constructor doesn't get forwarded arguments
319  // needed to construct a virtual base (or base class thereof).
320  if (auto Inherited = CD->getInheritedConstructor())
321  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
322  } else {
323  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
324  GD = GlobalDecl(DD, toCXXDtorType(Type));
325  }
326 
328 
329  // Add the formal parameters.
330  if (PassParams)
331  appendParameterTypes(*this, argTypes, paramInfos, FTP);
332 
333  CGCXXABI::AddedStructorArgs AddedArgs =
334  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
335  if (!paramInfos.empty()) {
336  // Note: prefix implies after the first param.
337  if (AddedArgs.Prefix)
338  paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
340  if (AddedArgs.Suffix)
341  paramInfos.append(AddedArgs.Suffix,
343  }
344 
345  RequiredArgs required =
346  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
348 
349  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
350  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
351  ? argTypes.front()
352  : TheCXXABI.hasMostDerivedReturn(GD)
353  ? CGM.getContext().VoidPtrTy
354  : Context.VoidTy;
355  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
356  /*chainCall=*/false, argTypes, extInfo,
357  paramInfos, required);
358 }
359 
362  SmallVector<CanQualType, 16> argTypes;
363  for (auto &arg : args)
364  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
365  return argTypes;
366 }
367 
368 static SmallVector<CanQualType, 16>
370  SmallVector<CanQualType, 16> argTypes;
371  for (auto &arg : args)
372  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
373  return argTypes;
374 }
375 
378  unsigned prefixArgs, unsigned totalArgs) {
380  if (proto->hasExtParameterInfos()) {
381  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
382  }
383  return result;
384 }
385 
386 /// Arrange a call to a C++ method, passing the given arguments.
387 ///
388 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
389 /// parameter.
390 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
391 /// args.
392 /// PassProtoArgs indicates whether `args` has args for the parameters in the
393 /// given CXXConstructorDecl.
394 const CGFunctionInfo &
396  const CXXConstructorDecl *D,
397  CXXCtorType CtorKind,
398  unsigned ExtraPrefixArgs,
399  unsigned ExtraSuffixArgs,
400  bool PassProtoArgs) {
401  // FIXME: Kill copy.
403  for (const auto &Arg : args)
404  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
405 
406  // +1 for implicit this, which should always be args[0].
407  unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
408 
410  RequiredArgs Required =
411  RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
412  GlobalDecl GD(D, CtorKind);
413  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
414  ? ArgTypes.front()
415  : TheCXXABI.hasMostDerivedReturn(GD)
416  ? CGM.getContext().VoidPtrTy
417  : Context.VoidTy;
418 
419  FunctionType::ExtInfo Info = FPT->getExtInfo();
421  // If the prototype args are elided, we should only have ABI-specific args,
422  // which never have param info.
423  if (PassProtoArgs && FPT->hasExtParameterInfos()) {
424  // ABI-specific suffix arguments are treated the same as variadic arguments.
425  addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
426  ArgTypes.size());
427  }
428  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
429  /*chainCall=*/false, ArgTypes, Info,
430  ParamInfos, Required);
431 }
432 
433 /// Arrange the argument and result information for the declaration or
434 /// definition of the given function.
435 const CGFunctionInfo &
437  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
438  if (MD->isInstance())
439  return arrangeCXXMethodDeclaration(MD);
440 
442 
443  assert(isa<FunctionType>(FTy));
444  setCUDAKernelCallingConvention(FTy, CGM, FD);
445 
446  // When declaring a function without a prototype, always use a
447  // non-variadic type.
450  noProto->getReturnType(), /*instanceMethod=*/false,
451  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
452  }
453 
455 }
456 
457 /// Arrange the argument and result information for the declaration or
458 /// definition of an Objective-C method.
459 const CGFunctionInfo &
461  // It happens that this is the same as a call with no optional
462  // arguments, except also using the formal 'self' type.
464 }
465 
466 /// Arrange the argument and result information for the function type
467 /// through which to perform a send to the given Objective-C method,
468 /// using the given receiver type. The receiver type is not always
469 /// the 'self' type of the method or even an Objective-C pointer type.
470 /// This is *not* the right method for actually performing such a
471 /// message send, due to the possibility of optional arguments.
472 const CGFunctionInfo &
474  QualType receiverType) {
477  argTys.push_back(Context.getCanonicalParamType(receiverType));
478  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
479  // FIXME: Kill copy?
480  for (const auto *I : MD->parameters()) {
481  argTys.push_back(Context.getCanonicalParamType(I->getType()));
483  I->hasAttr<NoEscapeAttr>());
484  extParamInfos.push_back(extParamInfo);
485  }
486 
487  FunctionType::ExtInfo einfo;
488  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
489  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
490 
491  if (getContext().getLangOpts().ObjCAutoRefCount &&
492  MD->hasAttr<NSReturnsRetainedAttr>())
493  einfo = einfo.withProducesResult(true);
494 
495  RequiredArgs required =
496  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
497 
499  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
500  /*chainCall=*/false, argTys, einfo, extParamInfos, required);
501 }
502 
503 const CGFunctionInfo &
505  const CallArgList &args) {
506  auto argTypes = getArgTypesForCall(Context, args);
507  FunctionType::ExtInfo einfo;
508 
510  GetReturnType(returnType), /*instanceMethod=*/false,
511  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
512 }
513 
514 const CGFunctionInfo &
516  // FIXME: Do we need to handle ObjCMethodDecl?
517  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
518 
519  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
521 
522  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
524 
525  return arrangeFunctionDeclaration(FD);
526 }
527 
528 /// Arrange a thunk that takes 'this' as the first parameter followed by
529 /// varargs. Return a void pointer, regardless of the actual return type.
530 /// The body of the thunk will end in a musttail call to a function of the
531 /// correct type, and the caller will bitcast the function to the correct
532 /// prototype.
533 const CGFunctionInfo &
535  assert(MD->isVirtual() && "only methods have thunks");
537  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent(), MD) };
538  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
539  /*chainCall=*/false, ArgTys,
540  FTP->getExtInfo(), {}, RequiredArgs(1));
541 }
542 
543 const CGFunctionInfo &
545  CXXCtorType CT) {
546  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
547 
550  const CXXRecordDecl *RD = CD->getParent();
551  ArgTys.push_back(GetThisType(Context, RD, CD));
552  if (CT == Ctor_CopyingClosure)
553  ArgTys.push_back(*FTP->param_type_begin());
554  if (RD->getNumVBases() > 0)
555  ArgTys.push_back(Context.IntTy);
557  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
558  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
559  /*chainCall=*/false, ArgTys,
560  FunctionType::ExtInfo(CC), {},
562 }
563 
564 /// Arrange a call as unto a free function, except possibly with an
565 /// additional number of formal parameters considered required.
566 static const CGFunctionInfo &
568  CodeGenModule &CGM,
569  const CallArgList &args,
570  const FunctionType *fnType,
571  unsigned numExtraRequiredArgs,
572  bool chainCall) {
573  assert(args.size() >= numExtraRequiredArgs);
574 
576 
577  // In most cases, there are no optional arguments.
578  RequiredArgs required = RequiredArgs::All;
579 
580  // If we have a variadic prototype, the required arguments are the
581  // extra prefix plus the arguments in the prototype.
582  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
583  if (proto->isVariadic())
584  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
585 
586  if (proto->hasExtParameterInfos())
587  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
588  args.size());
589 
590  // If we don't have a prototype at all, but we're supposed to
591  // explicitly use the variadic convention for unprototyped calls,
592  // treat all of the arguments as required but preserve the nominal
593  // possibility of variadics.
594  } else if (CGM.getTargetCodeGenInfo()
595  .isNoProtoCallVariadic(args,
596  cast<FunctionNoProtoType>(fnType))) {
597  required = RequiredArgs(args.size());
598  }
599 
600  // FIXME: Kill copy.
601  SmallVector<CanQualType, 16> argTypes;
602  for (const auto &arg : args)
603  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
605  /*instanceMethod=*/false, chainCall,
606  argTypes, fnType->getExtInfo(), paramInfos,
607  required);
608 }
609 
610 /// Figure out the rules for calling a function with the given formal
611 /// type using the given arguments. The arguments are necessary
612 /// because the function might be unprototyped, in which case it's
613 /// target-dependent in crazy ways.
614 const CGFunctionInfo &
616  const FunctionType *fnType,
617  bool chainCall) {
618  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
619  chainCall ? 1 : 0, chainCall);
620 }
621 
622 /// A block function is essentially a free function with an
623 /// extra implicit argument.
624 const CGFunctionInfo &
626  const FunctionType *fnType) {
627  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
628  /*chainCall=*/false);
629 }
630 
631 const CGFunctionInfo &
633  const FunctionArgList &params) {
634  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
635  auto argTypes = getArgTypesForDeclaration(Context, params);
636 
638  GetReturnType(proto->getReturnType()),
639  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
640  proto->getExtInfo(), paramInfos,
641  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
642 }
643 
644 const CGFunctionInfo &
646  const CallArgList &args) {
647  // FIXME: Kill copy.
649  for (const auto &Arg : args)
650  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
652  GetReturnType(resultType), /*instanceMethod=*/false,
653  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
654  /*paramInfos=*/ {}, RequiredArgs::All);
655 }
656 
657 const CGFunctionInfo &
659  const FunctionArgList &args) {
660  auto argTypes = getArgTypesForDeclaration(Context, args);
661 
663  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
664  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
665 }
666 
667 const CGFunctionInfo &
669  ArrayRef<CanQualType> argTypes) {
671  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
672  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
673 }
674 
675 /// Arrange a call to a C++ method, passing the given arguments.
676 ///
677 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
678 /// does not count `this`.
679 const CGFunctionInfo &
681  const FunctionProtoType *proto,
682  RequiredArgs required,
683  unsigned numPrefixArgs) {
684  assert(numPrefixArgs + 1 <= args.size() &&
685  "Emitting a call with less args than the required prefix?");
686  // Add one to account for `this`. It's a bit awkward here, but we don't count
687  // `this` in similar places elsewhere.
688  auto paramInfos =
689  getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
690 
691  // FIXME: Kill copy.
692  auto argTypes = getArgTypesForCall(Context, args);
693 
694  FunctionType::ExtInfo info = proto->getExtInfo();
696  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
697  /*chainCall=*/false, argTypes, info, paramInfos, required);
698 }
699 
702  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
704 }
705 
706 const CGFunctionInfo &
708  const CallArgList &args) {
709  assert(signature.arg_size() <= args.size());
710  if (signature.arg_size() == args.size())
711  return signature;
712 
714  auto sigParamInfos = signature.getExtParameterInfos();
715  if (!sigParamInfos.empty()) {
716  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
717  paramInfos.resize(args.size());
718  }
719 
720  auto argTypes = getArgTypesForCall(Context, args);
721 
722  assert(signature.getRequiredArgs().allowsOptionalArgs());
723  return arrangeLLVMFunctionInfo(signature.getReturnType(),
724  signature.isInstanceMethod(),
725  signature.isChainCall(),
726  argTypes,
727  signature.getExtInfo(),
728  paramInfos,
729  signature.getRequiredArgs());
730 }
731 
732 namespace clang {
733 namespace CodeGen {
735 }
736 }
737 
738 /// Arrange the argument and result information for an abstract value
739 /// of a given function type. This is the method which all of the
740 /// above functions ultimately defer to.
741 const CGFunctionInfo &
743  bool instanceMethod,
744  bool chainCall,
745  ArrayRef<CanQualType> argTypes,
748  RequiredArgs required) {
749  assert(llvm::all_of(argTypes,
750  [](CanQualType T) { return T.isCanonicalAsParam(); }));
751 
752  // Lookup or create unique function info.
753  llvm::FoldingSetNodeID ID;
754  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
755  required, resultType, argTypes);
756 
757  void *insertPos = nullptr;
758  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
759  if (FI)
760  return *FI;
761 
762  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
763 
764  // Construct the function info. We co-allocate the ArgInfos.
765  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
766  paramInfos, resultType, argTypes, required);
767  FunctionInfos.InsertNode(FI, insertPos);
768 
769  bool inserted = FunctionsBeingProcessed.insert(FI).second;
770  (void)inserted;
771  assert(inserted && "Recursively being processed?");
772 
773  // Compute ABI information.
774  if (CC == llvm::CallingConv::SPIR_KERNEL) {
775  // Force target independent argument handling for the host visible
776  // kernel functions.
777  computeSPIRKernelABIInfo(CGM, *FI);
778  } else if (info.getCC() == CC_Swift) {
779  swiftcall::computeABIInfo(CGM, *FI);
780  } else {
781  getABIInfo().computeInfo(*FI);
782  }
783 
784  // Loop over all of the computed argument and return value info. If any of
785  // them are direct or extend without a specified coerce type, specify the
786  // default now.
787  ABIArgInfo &retInfo = FI->getReturnInfo();
788  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
789  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
790 
791  for (auto &I : FI->arguments())
792  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
793  I.info.setCoerceToType(ConvertType(I.type));
794 
795  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
796  assert(erased && "Not in set?");
797 
798  return *FI;
799 }
800 
802  bool instanceMethod,
803  bool chainCall,
804  const FunctionType::ExtInfo &info,
805  ArrayRef<ExtParameterInfo> paramInfos,
806  CanQualType resultType,
807  ArrayRef<CanQualType> argTypes,
808  RequiredArgs required) {
809  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
810 
811  void *buffer =
812  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
813  argTypes.size() + 1, paramInfos.size()));
814 
815  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
816  FI->CallingConvention = llvmCC;
817  FI->EffectiveCallingConvention = llvmCC;
818  FI->ASTCallingConvention = info.getCC();
819  FI->InstanceMethod = instanceMethod;
820  FI->ChainCall = chainCall;
821  FI->NoReturn = info.getNoReturn();
822  FI->ReturnsRetained = info.getProducesResult();
823  FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
824  FI->NoCfCheck = info.getNoCfCheck();
825  FI->Required = required;
826  FI->HasRegParm = info.getHasRegParm();
827  FI->RegParm = info.getRegParm();
828  FI->ArgStruct = nullptr;
829  FI->ArgStructAlign = 0;
830  FI->NumArgs = argTypes.size();
831  FI->HasExtParameterInfos = !paramInfos.empty();
832  FI->getArgsBuffer()[0].type = resultType;
833  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
834  FI->getArgsBuffer()[i + 1].type = argTypes[i];
835  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
836  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
837  return FI;
838 }
839 
840 /***/
841 
842 namespace {
843 // ABIArgInfo::Expand implementation.
844 
845 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
846 struct TypeExpansion {
847  enum TypeExpansionKind {
848  // Elements of constant arrays are expanded recursively.
849  TEK_ConstantArray,
850  // Record fields are expanded recursively (but if record is a union, only
851  // the field with the largest size is expanded).
852  TEK_Record,
853  // For complex types, real and imaginary parts are expanded recursively.
854  TEK_Complex,
855  // All other types are not expandable.
856  TEK_None
857  };
858 
859  const TypeExpansionKind Kind;
860 
861  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
862  virtual ~TypeExpansion() {}
863 };
864 
865 struct ConstantArrayExpansion : TypeExpansion {
866  QualType EltTy;
867  uint64_t NumElts;
868 
869  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
870  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
871  static bool classof(const TypeExpansion *TE) {
872  return TE->Kind == TEK_ConstantArray;
873  }
874 };
875 
876 struct RecordExpansion : TypeExpansion {
878 
880 
881  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
883  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
884  Fields(std::move(Fields)) {}
885  static bool classof(const TypeExpansion *TE) {
886  return TE->Kind == TEK_Record;
887  }
888 };
889 
890 struct ComplexExpansion : TypeExpansion {
891  QualType EltTy;
892 
893  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
894  static bool classof(const TypeExpansion *TE) {
895  return TE->Kind == TEK_Complex;
896  }
897 };
898 
899 struct NoExpansion : TypeExpansion {
900  NoExpansion() : TypeExpansion(TEK_None) {}
901  static bool classof(const TypeExpansion *TE) {
902  return TE->Kind == TEK_None;
903  }
904 };
905 } // namespace
906 
907 static std::unique_ptr<TypeExpansion>
908 getTypeExpansion(QualType Ty, const ASTContext &Context) {
909  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
910  return llvm::make_unique<ConstantArrayExpansion>(
911  AT->getElementType(), AT->getSize().getZExtValue());
912  }
913  if (const RecordType *RT = Ty->getAs<RecordType>()) {
914  SmallVector<const CXXBaseSpecifier *, 1> Bases;
915  SmallVector<const FieldDecl *, 1> Fields;
916  const RecordDecl *RD = RT->getDecl();
917  assert(!RD->hasFlexibleArrayMember() &&
918  "Cannot expand structure with flexible array.");
919  if (RD->isUnion()) {
920  // Unions can be here only in degenerative cases - all the fields are same
921  // after flattening. Thus we have to use the "largest" field.
922  const FieldDecl *LargestFD = nullptr;
923  CharUnits UnionSize = CharUnits::Zero();
924 
925  for (const auto *FD : RD->fields()) {
926  if (FD->isZeroLengthBitField(Context))
927  continue;
928  assert(!FD->isBitField() &&
929  "Cannot expand structure with bit-field members.");
930  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
931  if (UnionSize < FieldSize) {
932  UnionSize = FieldSize;
933  LargestFD = FD;
934  }
935  }
936  if (LargestFD)
937  Fields.push_back(LargestFD);
938  } else {
939  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
940  assert(!CXXRD->isDynamicClass() &&
941  "cannot expand vtable pointers in dynamic classes");
942  for (const CXXBaseSpecifier &BS : CXXRD->bases())
943  Bases.push_back(&BS);
944  }
945 
946  for (const auto *FD : RD->fields()) {
947  if (FD->isZeroLengthBitField(Context))
948  continue;
949  assert(!FD->isBitField() &&
950  "Cannot expand structure with bit-field members.");
951  Fields.push_back(FD);
952  }
953  }
954  return llvm::make_unique<RecordExpansion>(std::move(Bases),
955  std::move(Fields));
956  }
957  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
958  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
959  }
960  return llvm::make_unique<NoExpansion>();
961 }
962 
963 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
964  auto Exp = getTypeExpansion(Ty, Context);
965  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
966  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
967  }
968  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
969  int Res = 0;
970  for (auto BS : RExp->Bases)
971  Res += getExpansionSize(BS->getType(), Context);
972  for (auto FD : RExp->Fields)
973  Res += getExpansionSize(FD->getType(), Context);
974  return Res;
975  }
976  if (isa<ComplexExpansion>(Exp.get()))
977  return 2;
978  assert(isa<NoExpansion>(Exp.get()));
979  return 1;
980 }
981 
982 void
985  auto Exp = getTypeExpansion(Ty, Context);
986  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
987  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
988  getExpandedTypes(CAExp->EltTy, TI);
989  }
990  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
991  for (auto BS : RExp->Bases)
992  getExpandedTypes(BS->getType(), TI);
993  for (auto FD : RExp->Fields)
994  getExpandedTypes(FD->getType(), TI);
995  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
996  llvm::Type *EltTy = ConvertType(CExp->EltTy);
997  *TI++ = EltTy;
998  *TI++ = EltTy;
999  } else {
1000  assert(isa<NoExpansion>(Exp.get()));
1001  *TI++ = ConvertType(Ty);
1002  }
1003 }
1004 
1006  ConstantArrayExpansion *CAE,
1007  Address BaseAddr,
1008  llvm::function_ref<void(Address)> Fn) {
1009  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1010  CharUnits EltAlign =
1011  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1012 
1013  for (int i = 0, n = CAE->NumElts; i < n; i++) {
1014  llvm::Value *EltAddr =
1015  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1016  Fn(Address(EltAddr, EltAlign));
1017  }
1018 }
1019 
1020 void CodeGenFunction::ExpandTypeFromArgs(
1022  assert(LV.isSimple() &&
1023  "Unexpected non-simple lvalue during struct expansion.");
1024 
1025  auto Exp = getTypeExpansion(Ty, getContext());
1026  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1027  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1028  [&](Address EltAddr) {
1029  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1030  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1031  });
1032  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1033  Address This = LV.getAddress();
1034  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1035  // Perform a single step derived-to-base conversion.
1036  Address Base =
1037  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1038  /*NullCheckValue=*/false, SourceLocation());
1039  LValue SubLV = MakeAddrLValue(Base, BS->getType());
1040 
1041  // Recurse onto bases.
1042  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1043  }
1044  for (auto FD : RExp->Fields) {
1045  // FIXME: What are the right qualifiers here?
1046  LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1047  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1048  }
1049  } else if (isa<ComplexExpansion>(Exp.get())) {
1050  auto realValue = *AI++;
1051  auto imagValue = *AI++;
1052  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1053  } else {
1054  assert(isa<NoExpansion>(Exp.get()));
1055  EmitStoreThroughLValue(RValue::get(*AI++), LV);
1056  }
1057 }
1058 
1059 void CodeGenFunction::ExpandTypeToArgs(
1060  QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1061  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1062  auto Exp = getTypeExpansion(Ty, getContext());
1063  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1064  Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1067  *this, CAExp, Addr, [&](Address EltAddr) {
1068  CallArg EltArg = CallArg(
1069  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1070  CAExp->EltTy);
1071  ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1072  IRCallArgPos);
1073  });
1074  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1075  Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1077  for (const CXXBaseSpecifier *BS : RExp->Bases) {
1078  // Perform a single step derived-to-base conversion.
1079  Address Base =
1080  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1081  /*NullCheckValue=*/false, SourceLocation());
1082  CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1083 
1084  // Recurse onto bases.
1085  ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1086  IRCallArgPos);
1087  }
1088 
1089  LValue LV = MakeAddrLValue(This, Ty);
1090  for (auto FD : RExp->Fields) {
1091  CallArg FldArg =
1092  CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1093  ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1094  IRCallArgPos);
1095  }
1096  } else if (isa<ComplexExpansion>(Exp.get())) {
1098  IRCallArgs[IRCallArgPos++] = CV.first;
1099  IRCallArgs[IRCallArgPos++] = CV.second;
1100  } else {
1101  assert(isa<NoExpansion>(Exp.get()));
1102  auto RV = Arg.getKnownRValue();
1103  assert(RV.isScalar() &&
1104  "Unexpected non-scalar rvalue during struct expansion.");
1105 
1106  // Insert a bitcast as needed.
1107  llvm::Value *V = RV.getScalarVal();
1108  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1109  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1110  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1111 
1112  IRCallArgs[IRCallArgPos++] = V;
1113  }
1114 }
1115 
1116 /// Create a temporary allocation for the purposes of coercion.
1118  CharUnits MinAlign) {
1119  // Don't use an alignment that's worse than what LLVM would prefer.
1120  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1121  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1122 
1123  return CGF.CreateTempAlloca(Ty, Align);
1124 }
1125 
1126 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1127 /// accessing some number of bytes out of it, try to gep into the struct to get
1128 /// at its inner goodness. Dive as deep as possible without entering an element
1129 /// with an in-memory size smaller than DstSize.
1130 static Address
1132  llvm::StructType *SrcSTy,
1133  uint64_t DstSize, CodeGenFunction &CGF) {
1134  // We can't dive into a zero-element struct.
1135  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1136 
1137  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1138 
1139  // If the first elt is at least as large as what we're looking for, or if the
1140  // first element is the same size as the whole struct, we can enter it. The
1141  // comparison must be made on the store size and not the alloca size. Using
1142  // the alloca size may overstate the size of the load.
1143  uint64_t FirstEltSize =
1144  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1145  if (FirstEltSize < DstSize &&
1146  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1147  return SrcPtr;
1148 
1149  // GEP into the first element.
1150  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1151 
1152  // If the first element is a struct, recurse.
1153  llvm::Type *SrcTy = SrcPtr.getElementType();
1154  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1155  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1156 
1157  return SrcPtr;
1158 }
1159 
1160 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1161 /// are either integers or pointers. This does a truncation of the value if it
1162 /// is too large or a zero extension if it is too small.
1163 ///
1164 /// This behaves as if the value were coerced through memory, so on big-endian
1165 /// targets the high bits are preserved in a truncation, while little-endian
1166 /// targets preserve the low bits.
1168  llvm::Type *Ty,
1169  CodeGenFunction &CGF) {
1170  if (Val->getType() == Ty)
1171  return Val;
1172 
1173  if (isa<llvm::PointerType>(Val->getType())) {
1174  // If this is Pointer->Pointer avoid conversion to and from int.
1175  if (isa<llvm::PointerType>(Ty))
1176  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1177 
1178  // Convert the pointer to an integer so we can play with its width.
1179  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1180  }
1181 
1182  llvm::Type *DestIntTy = Ty;
1183  if (isa<llvm::PointerType>(DestIntTy))
1184  DestIntTy = CGF.IntPtrTy;
1185 
1186  if (Val->getType() != DestIntTy) {
1187  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1188  if (DL.isBigEndian()) {
1189  // Preserve the high bits on big-endian targets.
1190  // That is what memory coercion does.
1191  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1192  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1193 
1194  if (SrcSize > DstSize) {
1195  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1196  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1197  } else {
1198  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1199  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1200  }
1201  } else {
1202  // Little-endian targets preserve the low bits. No shifts required.
1203  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1204  }
1205  }
1206 
1207  if (isa<llvm::PointerType>(Ty))
1208  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1209  return Val;
1210 }
1211 
1212 
1213 
1214 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1215 /// a pointer to an object of type \arg Ty, known to be aligned to
1216 /// \arg SrcAlign bytes.
1217 ///
1218 /// This safely handles the case when the src type is smaller than the
1219 /// destination type; in this situation the values of bits which not
1220 /// present in the src are undefined.
1222  CodeGenFunction &CGF) {
1223  llvm::Type *SrcTy = Src.getElementType();
1224 
1225  // If SrcTy and Ty are the same, just do a load.
1226  if (SrcTy == Ty)
1227  return CGF.Builder.CreateLoad(Src);
1228 
1229  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1230 
1231  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1232  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1233  SrcTy = Src.getType()->getElementType();
1234  }
1235 
1236  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1237 
1238  // If the source and destination are integer or pointer types, just do an
1239  // extension or truncation to the desired type.
1240  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1241  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1242  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1243  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1244  }
1245 
1246  // If load is legal, just bitcast the src pointer.
1247  if (SrcSize >= DstSize) {
1248  // Generally SrcSize is never greater than DstSize, since this means we are
1249  // losing bits. However, this can happen in cases where the structure has
1250  // additional padding, for example due to a user specified alignment.
1251  //
1252  // FIXME: Assert that we aren't truncating non-padding bits when have access
1253  // to that information.
1254  Src = CGF.Builder.CreateBitCast(Src,
1255  Ty->getPointerTo(Src.getAddressSpace()));
1256  return CGF.Builder.CreateLoad(Src);
1257  }
1258 
1259  // Otherwise do coercion through memory. This is stupid, but simple.
1260  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1261  Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1262  Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1263  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1264  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1265  false);
1266  return CGF.Builder.CreateLoad(Tmp);
1267 }
1268 
1269 // Function to store a first-class aggregate into memory. We prefer to
1270 // store the elements rather than the aggregate to be more friendly to
1271 // fast-isel.
1272 // FIXME: Do we need to recurse here?
1274  Address Dest, bool DestIsVolatile) {
1275  // Prefer scalar stores to first-class aggregate stores.
1276  if (llvm::StructType *STy =
1277  dyn_cast<llvm::StructType>(Val->getType())) {
1278  const llvm::StructLayout *Layout =
1279  CGF.CGM.getDataLayout().getStructLayout(STy);
1280 
1281  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1282  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1283  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1284  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1285  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1286  }
1287  } else {
1288  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1289  }
1290 }
1291 
1292 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1293 /// where the source and destination may have different types. The
1294 /// destination is known to be aligned to \arg DstAlign bytes.
1295 ///
1296 /// This safely handles the case when the src type is larger than the
1297 /// destination type; the upper bits of the src will be lost.
1299  Address Dst,
1300  bool DstIsVolatile,
1301  CodeGenFunction &CGF) {
1302  llvm::Type *SrcTy = Src->getType();
1303  llvm::Type *DstTy = Dst.getType()->getElementType();
1304  if (SrcTy == DstTy) {
1305  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1306  return;
1307  }
1308 
1309  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1310 
1311  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1312  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1313  DstTy = Dst.getType()->getElementType();
1314  }
1315 
1316  // If the source and destination are integer or pointer types, just do an
1317  // extension or truncation to the desired type.
1318  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1319  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1320  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1321  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1322  return;
1323  }
1324 
1325  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1326 
1327  // If store is legal, just bitcast the src pointer.
1328  if (SrcSize <= DstSize) {
1329  Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1330  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1331  } else {
1332  // Otherwise do coercion through memory. This is stupid, but
1333  // simple.
1334 
1335  // Generally SrcSize is never greater than DstSize, since this means we are
1336  // losing bits. However, this can happen in cases where the structure has
1337  // additional padding, for example due to a user specified alignment.
1338  //
1339  // FIXME: Assert that we aren't truncating non-padding bits when have access
1340  // to that information.
1341  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1342  CGF.Builder.CreateStore(Src, Tmp);
1343  Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1344  Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1345  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1346  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1347  false);
1348  }
1349 }
1350 
1352  const ABIArgInfo &info) {
1353  if (unsigned offset = info.getDirectOffset()) {
1354  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1355  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1356  CharUnits::fromQuantity(offset));
1357  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1358  }
1359  return addr;
1360 }
1361 
1362 namespace {
1363 
1364 /// Encapsulates information about the way function arguments from
1365 /// CGFunctionInfo should be passed to actual LLVM IR function.
1366 class ClangToLLVMArgMapping {
1367  static const unsigned InvalidIndex = ~0U;
1368  unsigned InallocaArgNo;
1369  unsigned SRetArgNo;
1370  unsigned TotalIRArgs;
1371 
1372  /// Arguments of LLVM IR function corresponding to single Clang argument.
1373  struct IRArgs {
1374  unsigned PaddingArgIndex;
1375  // Argument is expanded to IR arguments at positions
1376  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1377  unsigned FirstArgIndex;
1378  unsigned NumberOfArgs;
1379 
1380  IRArgs()
1381  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1382  NumberOfArgs(0) {}
1383  };
1384 
1385  SmallVector<IRArgs, 8> ArgInfo;
1386 
1387 public:
1388  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1389  bool OnlyRequiredArgs = false)
1390  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1391  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1392  construct(Context, FI, OnlyRequiredArgs);
1393  }
1394 
1395  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1396  unsigned getInallocaArgNo() const {
1397  assert(hasInallocaArg());
1398  return InallocaArgNo;
1399  }
1400 
1401  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1402  unsigned getSRetArgNo() const {
1403  assert(hasSRetArg());
1404  return SRetArgNo;
1405  }
1406 
1407  unsigned totalIRArgs() const { return TotalIRArgs; }
1408 
1409  bool hasPaddingArg(unsigned ArgNo) const {
1410  assert(ArgNo < ArgInfo.size());
1411  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1412  }
1413  unsigned getPaddingArgNo(unsigned ArgNo) const {
1414  assert(hasPaddingArg(ArgNo));
1415  return ArgInfo[ArgNo].PaddingArgIndex;
1416  }
1417 
1418  /// Returns index of first IR argument corresponding to ArgNo, and their
1419  /// quantity.
1420  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1421  assert(ArgNo < ArgInfo.size());
1422  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1423  ArgInfo[ArgNo].NumberOfArgs);
1424  }
1425 
1426 private:
1427  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1428  bool OnlyRequiredArgs);
1429 };
1430 
1431 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1432  const CGFunctionInfo &FI,
1433  bool OnlyRequiredArgs) {
1434  unsigned IRArgNo = 0;
1435  bool SwapThisWithSRet = false;
1436  const ABIArgInfo &RetAI = FI.getReturnInfo();
1437 
1438  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1439  SwapThisWithSRet = RetAI.isSRetAfterThis();
1440  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1441  }
1442 
1443  unsigned ArgNo = 0;
1444  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1445  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1446  ++I, ++ArgNo) {
1447  assert(I != FI.arg_end());
1448  QualType ArgType = I->type;
1449  const ABIArgInfo &AI = I->info;
1450  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1451  auto &IRArgs = ArgInfo[ArgNo];
1452 
1453  if (AI.getPaddingType())
1454  IRArgs.PaddingArgIndex = IRArgNo++;
1455 
1456  switch (AI.getKind()) {
1457  case ABIArgInfo::Extend:
1458  case ABIArgInfo::Direct: {
1459  // FIXME: handle sseregparm someday...
1460  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1461  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1462  IRArgs.NumberOfArgs = STy->getNumElements();
1463  } else {
1464  IRArgs.NumberOfArgs = 1;
1465  }
1466  break;
1467  }
1468  case ABIArgInfo::Indirect:
1469  IRArgs.NumberOfArgs = 1;
1470  break;
1471  case ABIArgInfo::Ignore:
1472  case ABIArgInfo::InAlloca:
1473  // ignore and inalloca doesn't have matching LLVM parameters.
1474  IRArgs.NumberOfArgs = 0;
1475  break;
1477  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1478  break;
1479  case ABIArgInfo::Expand:
1480  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1481  break;
1482  }
1483 
1484  if (IRArgs.NumberOfArgs > 0) {
1485  IRArgs.FirstArgIndex = IRArgNo;
1486  IRArgNo += IRArgs.NumberOfArgs;
1487  }
1488 
1489  // Skip over the sret parameter when it comes second. We already handled it
1490  // above.
1491  if (IRArgNo == 1 && SwapThisWithSRet)
1492  IRArgNo++;
1493  }
1494  assert(ArgNo == ArgInfo.size());
1495 
1496  if (FI.usesInAlloca())
1497  InallocaArgNo = IRArgNo++;
1498 
1499  TotalIRArgs = IRArgNo;
1500 }
1501 } // namespace
1502 
1503 /***/
1504 
1506  const auto &RI = FI.getReturnInfo();
1507  return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1508 }
1509 
1511  return ReturnTypeUsesSRet(FI) &&
1512  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1513 }
1514 
1516  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1517  switch (BT->getKind()) {
1518  default:
1519  return false;
1520  case BuiltinType::Float:
1522  case BuiltinType::Double:
1524  case BuiltinType::LongDouble:
1526  }
1527  }
1528 
1529  return false;
1530 }
1531 
1533  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1534  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1535  if (BT->getKind() == BuiltinType::LongDouble)
1537  }
1538  }
1539 
1540  return false;
1541 }
1542 
1544  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1545  return GetFunctionType(FI);
1546 }
1547 
1548 llvm::FunctionType *
1550 
1551  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1552  (void)Inserted;
1553  assert(Inserted && "Recursively being processed?");
1554 
1555  llvm::Type *resultType = nullptr;
1556  const ABIArgInfo &retAI = FI.getReturnInfo();
1557  switch (retAI.getKind()) {
1558  case ABIArgInfo::Expand:
1559  llvm_unreachable("Invalid ABI kind for return argument");
1560 
1561  case ABIArgInfo::Extend:
1562  case ABIArgInfo::Direct:
1563  resultType = retAI.getCoerceToType();
1564  break;
1565 
1566  case ABIArgInfo::InAlloca:
1567  if (retAI.getInAllocaSRet()) {
1568  // sret things on win32 aren't void, they return the sret pointer.
1569  QualType ret = FI.getReturnType();
1570  llvm::Type *ty = ConvertType(ret);
1571  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1572  resultType = llvm::PointerType::get(ty, addressSpace);
1573  } else {
1574  resultType = llvm::Type::getVoidTy(getLLVMContext());
1575  }
1576  break;
1577 
1578  case ABIArgInfo::Indirect:
1579  case ABIArgInfo::Ignore:
1580  resultType = llvm::Type::getVoidTy(getLLVMContext());
1581  break;
1582 
1584  resultType = retAI.getUnpaddedCoerceAndExpandType();
1585  break;
1586  }
1587 
1588  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1589  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1590 
1591  // Add type for sret argument.
1592  if (IRFunctionArgs.hasSRetArg()) {
1593  QualType Ret = FI.getReturnType();
1594  llvm::Type *Ty = ConvertType(Ret);
1595  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1596  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1597  llvm::PointerType::get(Ty, AddressSpace);
1598  }
1599 
1600  // Add type for inalloca argument.
1601  if (IRFunctionArgs.hasInallocaArg()) {
1602  auto ArgStruct = FI.getArgStruct();
1603  assert(ArgStruct);
1604  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1605  }
1606 
1607  // Add in all of the required arguments.
1608  unsigned ArgNo = 0;
1610  ie = it + FI.getNumRequiredArgs();
1611  for (; it != ie; ++it, ++ArgNo) {
1612  const ABIArgInfo &ArgInfo = it->info;
1613 
1614  // Insert a padding type to ensure proper alignment.
1615  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1616  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1617  ArgInfo.getPaddingType();
1618 
1619  unsigned FirstIRArg, NumIRArgs;
1620  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1621 
1622  switch (ArgInfo.getKind()) {
1623  case ABIArgInfo::Ignore:
1624  case ABIArgInfo::InAlloca:
1625  assert(NumIRArgs == 0);
1626  break;
1627 
1628  case ABIArgInfo::Indirect: {
1629  assert(NumIRArgs == 1);
1630  // indirect arguments are always on the stack, which is alloca addr space.
1631  llvm::Type *LTy = ConvertTypeForMem(it->type);
1632  ArgTypes[FirstIRArg] = LTy->getPointerTo(
1633  CGM.getDataLayout().getAllocaAddrSpace());
1634  break;
1635  }
1636 
1637  case ABIArgInfo::Extend:
1638  case ABIArgInfo::Direct: {
1639  // Fast-isel and the optimizer generally like scalar values better than
1640  // FCAs, so we flatten them if this is safe to do for this argument.
1641  llvm::Type *argType = ArgInfo.getCoerceToType();
1642  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1643  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1644  assert(NumIRArgs == st->getNumElements());
1645  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1646  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1647  } else {
1648  assert(NumIRArgs == 1);
1649  ArgTypes[FirstIRArg] = argType;
1650  }
1651  break;
1652  }
1653 
1655  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1656  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1657  *ArgTypesIter++ = EltTy;
1658  }
1659  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1660  break;
1661  }
1662 
1663  case ABIArgInfo::Expand:
1664  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1665  getExpandedTypes(it->type, ArgTypesIter);
1666  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1667  break;
1668  }
1669  }
1670 
1671  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1672  assert(Erased && "Not in set?");
1673 
1674  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1675 }
1676 
1678  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1679  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1680 
1681  if (!isFuncTypeConvertible(FPT))
1682  return llvm::StructType::get(getLLVMContext());
1683 
1684  const CGFunctionInfo *Info;
1685  if (isa<CXXDestructorDecl>(MD))
1686  Info =
1688  else
1689  Info = &arrangeCXXMethodDeclaration(MD);
1690  return GetFunctionType(*Info);
1691 }
1692 
1694  llvm::AttrBuilder &FuncAttrs,
1695  const FunctionProtoType *FPT) {
1696  if (!FPT)
1697  return;
1698 
1700  FPT->isNothrow())
1701  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1702 }
1703 
1704 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1705  bool AttrOnCallSite,
1706  llvm::AttrBuilder &FuncAttrs) {
1707  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1708  if (!HasOptnone) {
1709  if (CodeGenOpts.OptimizeSize)
1710  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1711  if (CodeGenOpts.OptimizeSize == 2)
1712  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1713  }
1714 
1715  if (CodeGenOpts.DisableRedZone)
1716  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1717  if (CodeGenOpts.IndirectTlsSegRefs)
1718  FuncAttrs.addAttribute("indirect-tls-seg-refs");
1719  if (CodeGenOpts.NoImplicitFloat)
1720  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1721 
1722  if (AttrOnCallSite) {
1723  // Attributes that should go on the call site only.
1724  if (!CodeGenOpts.SimplifyLibCalls ||
1725  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1726  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1727  if (!CodeGenOpts.TrapFuncName.empty())
1728  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1729  } else {
1730  // Attributes that should go on the function, but not the call site.
1731  if (!CodeGenOpts.DisableFPElim) {
1732  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1733  } else if (CodeGenOpts.OmitLeafFramePointer) {
1734  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1735  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1736  } else {
1737  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1738  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1739  }
1740 
1741  FuncAttrs.addAttribute("less-precise-fpmad",
1742  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1743 
1744  if (CodeGenOpts.NullPointerIsValid)
1745  FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1746  if (!CodeGenOpts.FPDenormalMode.empty())
1747  FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1748 
1749  FuncAttrs.addAttribute("no-trapping-math",
1750  llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1751 
1752  // Strict (compliant) code is the default, so only add this attribute to
1753  // indicate that we are trying to workaround a problem case.
1754  if (!CodeGenOpts.StrictFloatCastOverflow)
1755  FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1756 
1757  // TODO: Are these all needed?
1758  // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1759  FuncAttrs.addAttribute("no-infs-fp-math",
1760  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1761  FuncAttrs.addAttribute("no-nans-fp-math",
1762  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1763  FuncAttrs.addAttribute("unsafe-fp-math",
1764  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1765  FuncAttrs.addAttribute("use-soft-float",
1766  llvm::toStringRef(CodeGenOpts.SoftFloat));
1767  FuncAttrs.addAttribute("stack-protector-buffer-size",
1768  llvm::utostr(CodeGenOpts.SSPBufferSize));
1769  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1770  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1771  FuncAttrs.addAttribute(
1772  "correctly-rounded-divide-sqrt-fp-math",
1773  llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1774 
1775  if (getLangOpts().OpenCL)
1776  FuncAttrs.addAttribute("denorms-are-zero",
1777  llvm::toStringRef(CodeGenOpts.FlushDenorm));
1778 
1779  // TODO: Reciprocal estimate codegen options should apply to instructions?
1780  const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1781  if (!Recips.empty())
1782  FuncAttrs.addAttribute("reciprocal-estimates",
1783  llvm::join(Recips, ","));
1784 
1785  if (!CodeGenOpts.PreferVectorWidth.empty() &&
1786  CodeGenOpts.PreferVectorWidth != "none")
1787  FuncAttrs.addAttribute("prefer-vector-width",
1788  CodeGenOpts.PreferVectorWidth);
1789 
1790  if (CodeGenOpts.StackRealignment)
1791  FuncAttrs.addAttribute("stackrealign");
1792  if (CodeGenOpts.Backchain)
1793  FuncAttrs.addAttribute("backchain");
1794 
1795  if (CodeGenOpts.SpeculativeLoadHardening)
1796  FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1797  }
1798 
1799  if (getLangOpts().assumeFunctionsAreConvergent()) {
1800  // Conservatively, mark all functions and calls in CUDA and OpenCL as
1801  // convergent (meaning, they may call an intrinsically convergent op, such
1802  // as __syncthreads() / barrier(), and so can't have certain optimizations
1803  // applied around them). LLVM will remove this attribute where it safely
1804  // can.
1805  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1806  }
1807 
1808  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1809  // Exceptions aren't supported in CUDA device code.
1810  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1811 
1812  // Respect -fcuda-flush-denormals-to-zero.
1813  if (CodeGenOpts.FlushDenorm)
1814  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1815  }
1816 
1817  for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1818  StringRef Var, Value;
1819  std::tie(Var, Value) = Attr.split('=');
1820  FuncAttrs.addAttribute(Var, Value);
1821  }
1822 }
1823 
1824 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1825  llvm::AttrBuilder FuncAttrs;
1826  ConstructDefaultFnAttrList(F.getName(),
1827  F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1828  /* AttrOnCallsite = */ false, FuncAttrs);
1829  F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1830 }
1831 
1833  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1834  llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1835  llvm::AttrBuilder FuncAttrs;
1836  llvm::AttrBuilder RetAttrs;
1837 
1838  CallingConv = FI.getEffectiveCallingConvention();
1839  if (FI.isNoReturn())
1840  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1841 
1842  // If we have information about the function prototype, we can learn
1843  // attributes from there.
1845  CalleeInfo.getCalleeFunctionProtoType());
1846 
1847  const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1848 
1849  bool HasOptnone = false;
1850  // FIXME: handle sseregparm someday...
1851  if (TargetDecl) {
1852  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1853  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1854  if (TargetDecl->hasAttr<NoThrowAttr>())
1855  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1856  if (TargetDecl->hasAttr<NoReturnAttr>())
1857  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1858  if (TargetDecl->hasAttr<ColdAttr>())
1859  FuncAttrs.addAttribute(llvm::Attribute::Cold);
1860  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1861  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1862  if (TargetDecl->hasAttr<ConvergentAttr>())
1863  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1864 
1865  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1867  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1868  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1869  // These attributes are not inherited by overloads.
1870  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1871  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1872  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1873  }
1874 
1875  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1876  if (TargetDecl->hasAttr<ConstAttr>()) {
1877  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1878  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1879  } else if (TargetDecl->hasAttr<PureAttr>()) {
1880  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1881  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1882  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1883  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1884  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1885  }
1886  if (TargetDecl->hasAttr<RestrictAttr>())
1887  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1888  if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1889  !CodeGenOpts.NullPointerIsValid)
1890  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1891  if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1892  FuncAttrs.addAttribute("no_caller_saved_registers");
1893  if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1894  FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1895 
1896  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1897  if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1898  Optional<unsigned> NumElemsParam;
1899  if (AllocSize->getNumElemsParam().isValid())
1900  NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1901  FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1902  NumElemsParam);
1903  }
1904  }
1905 
1906  ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1907 
1908  // This must run after constructing the default function attribute list
1909  // to ensure that the speculative load hardening attribute is removed
1910  // in the case where the -mspeculative-load-hardening flag was passed.
1911  if (TargetDecl) {
1912  if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1913  FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1914  if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1915  FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1916  }
1917 
1918  if (CodeGenOpts.EnableSegmentedStacks &&
1919  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1920  FuncAttrs.addAttribute("split-stack");
1921 
1922  // Add NonLazyBind attribute to function declarations when -fno-plt
1923  // is used.
1924  if (TargetDecl && CodeGenOpts.NoPLT) {
1925  if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1926  if (!Fn->isDefined() && !AttrOnCallSite) {
1927  FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1928  }
1929  }
1930  }
1931 
1932  if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1933  if (getLangOpts().OpenCLVersion <= 120) {
1934  // OpenCL v1.2 Work groups are always uniform
1935  FuncAttrs.addAttribute("uniform-work-group-size", "true");
1936  } else {
1937  // OpenCL v2.0 Work groups may be whether uniform or not.
1938  // '-cl-uniform-work-group-size' compile option gets a hint
1939  // to the compiler that the global work-size be a multiple of
1940  // the work-group size specified to clEnqueueNDRangeKernel
1941  // (i.e. work groups are uniform).
1942  FuncAttrs.addAttribute("uniform-work-group-size",
1943  llvm::toStringRef(CodeGenOpts.UniformWGSize));
1944  }
1945  }
1946 
1947  if (!AttrOnCallSite) {
1948  bool DisableTailCalls = false;
1949 
1950  if (CodeGenOpts.DisableTailCalls)
1951  DisableTailCalls = true;
1952  else if (TargetDecl) {
1953  if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1954  TargetDecl->hasAttr<AnyX86InterruptAttr>())
1955  DisableTailCalls = true;
1956  else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1957  if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1958  if (!BD->doesNotEscape())
1959  DisableTailCalls = true;
1960  }
1961  }
1962 
1963  FuncAttrs.addAttribute("disable-tail-calls",
1964  llvm::toStringRef(DisableTailCalls));
1965  GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1966  }
1967 
1968  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1969 
1970  QualType RetTy = FI.getReturnType();
1971  const ABIArgInfo &RetAI = FI.getReturnInfo();
1972  switch (RetAI.getKind()) {
1973  case ABIArgInfo::Extend:
1974  if (RetAI.isSignExt())
1975  RetAttrs.addAttribute(llvm::Attribute::SExt);
1976  else
1977  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1978  LLVM_FALLTHROUGH;
1979  case ABIArgInfo::Direct:
1980  if (RetAI.getInReg())
1981  RetAttrs.addAttribute(llvm::Attribute::InReg);
1982  break;
1983  case ABIArgInfo::Ignore:
1984  break;
1985 
1986  case ABIArgInfo::InAlloca:
1987  case ABIArgInfo::Indirect: {
1988  // inalloca and sret disable readnone and readonly
1989  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1990  .removeAttribute(llvm::Attribute::ReadNone);
1991  break;
1992  }
1993 
1995  break;
1996 
1997  case ABIArgInfo::Expand:
1998  llvm_unreachable("Invalid ABI kind for return argument");
1999  }
2000 
2001  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2002  QualType PTy = RefTy->getPointeeType();
2003  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2004  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2005  .getQuantity());
2006  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2007  !CodeGenOpts.NullPointerIsValid)
2008  RetAttrs.addAttribute(llvm::Attribute::NonNull);
2009  }
2010 
2011  bool hasUsedSRet = false;
2012  SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2013 
2014  // Attach attributes to sret.
2015  if (IRFunctionArgs.hasSRetArg()) {
2016  llvm::AttrBuilder SRETAttrs;
2017  if (!RetAI.getSuppressSRet())
2018  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2019  hasUsedSRet = true;
2020  if (RetAI.getInReg())
2021  SRETAttrs.addAttribute(llvm::Attribute::InReg);
2022  ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2023  llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2024  }
2025 
2026  // Attach attributes to inalloca argument.
2027  if (IRFunctionArgs.hasInallocaArg()) {
2028  llvm::AttrBuilder Attrs;
2029  Attrs.addAttribute(llvm::Attribute::InAlloca);
2030  ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2031  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2032  }
2033 
2034  unsigned ArgNo = 0;
2036  E = FI.arg_end();
2037  I != E; ++I, ++ArgNo) {
2038  QualType ParamType = I->type;
2039  const ABIArgInfo &AI = I->info;
2040  llvm::AttrBuilder Attrs;
2041 
2042  // Add attribute for padding argument, if necessary.
2043  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2044  if (AI.getPaddingInReg()) {
2045  ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2046  llvm::AttributeSet::get(
2047  getLLVMContext(),
2048  llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2049  }
2050  }
2051 
2052  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2053  // have the corresponding parameter variable. It doesn't make
2054  // sense to do it here because parameters are so messed up.
2055  switch (AI.getKind()) {
2056  case ABIArgInfo::Extend:
2057  if (AI.isSignExt())
2058  Attrs.addAttribute(llvm::Attribute::SExt);
2059  else
2060  Attrs.addAttribute(llvm::Attribute::ZExt);
2061  LLVM_FALLTHROUGH;
2062  case ABIArgInfo::Direct:
2063  if (ArgNo == 0 && FI.isChainCall())
2064  Attrs.addAttribute(llvm::Attribute::Nest);
2065  else if (AI.getInReg())
2066  Attrs.addAttribute(llvm::Attribute::InReg);
2067  break;
2068 
2069  case ABIArgInfo::Indirect: {
2070  if (AI.getInReg())
2071  Attrs.addAttribute(llvm::Attribute::InReg);
2072 
2073  if (AI.getIndirectByVal())
2074  Attrs.addAttribute(llvm::Attribute::ByVal);
2075 
2076  CharUnits Align = AI.getIndirectAlign();
2077 
2078  // In a byval argument, it is important that the required
2079  // alignment of the type is honored, as LLVM might be creating a
2080  // *new* stack object, and needs to know what alignment to give
2081  // it. (Sometimes it can deduce a sensible alignment on its own,
2082  // but not if clang decides it must emit a packed struct, or the
2083  // user specifies increased alignment requirements.)
2084  //
2085  // This is different from indirect *not* byval, where the object
2086  // exists already, and the align attribute is purely
2087  // informative.
2088  assert(!Align.isZero());
2089 
2090  // For now, only add this when we have a byval argument.
2091  // TODO: be less lazy about updating test cases.
2092  if (AI.getIndirectByVal())
2093  Attrs.addAlignmentAttr(Align.getQuantity());
2094 
2095  // byval disables readnone and readonly.
2096  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2097  .removeAttribute(llvm::Attribute::ReadNone);
2098  break;
2099  }
2100  case ABIArgInfo::Ignore:
2101  case ABIArgInfo::Expand:
2103  break;
2104 
2105  case ABIArgInfo::InAlloca:
2106  // inalloca disables readnone and readonly.
2107  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2108  .removeAttribute(llvm::Attribute::ReadNone);
2109  continue;
2110  }
2111 
2112  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2113  QualType PTy = RefTy->getPointeeType();
2114  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2115  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2116  .getQuantity());
2117  else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2118  !CodeGenOpts.NullPointerIsValid)
2119  Attrs.addAttribute(llvm::Attribute::NonNull);
2120  }
2121 
2122  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2124  break;
2125 
2127  // Add 'sret' if we haven't already used it for something, but
2128  // only if the result is void.
2129  if (!hasUsedSRet && RetTy->isVoidType()) {
2130  Attrs.addAttribute(llvm::Attribute::StructRet);
2131  hasUsedSRet = true;
2132  }
2133 
2134  // Add 'noalias' in either case.
2135  Attrs.addAttribute(llvm::Attribute::NoAlias);
2136 
2137  // Add 'dereferenceable' and 'alignment'.
2138  auto PTy = ParamType->getPointeeType();
2139  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2140  auto info = getContext().getTypeInfoInChars(PTy);
2141  Attrs.addDereferenceableAttr(info.first.getQuantity());
2142  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2143  info.second.getQuantity()));
2144  }
2145  break;
2146  }
2147 
2149  Attrs.addAttribute(llvm::Attribute::SwiftError);
2150  break;
2151 
2153  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2154  break;
2155  }
2156 
2157  if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2158  Attrs.addAttribute(llvm::Attribute::NoCapture);
2159 
2160  if (Attrs.hasAttributes()) {
2161  unsigned FirstIRArg, NumIRArgs;
2162  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2163  for (unsigned i = 0; i < NumIRArgs; i++)
2164  ArgAttrs[FirstIRArg + i] =
2165  llvm::AttributeSet::get(getLLVMContext(), Attrs);
2166  }
2167  }
2168  assert(ArgNo == FI.arg_size());
2169 
2170  AttrList = llvm::AttributeList::get(
2171  getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2172  llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2173 }
2174 
2175 /// An argument came in as a promoted argument; demote it back to its
2176 /// declared type.
2178  const VarDecl *var,
2179  llvm::Value *value) {
2180  llvm::Type *varType = CGF.ConvertType(var->getType());
2181 
2182  // This can happen with promotions that actually don't change the
2183  // underlying type, like the enum promotions.
2184  if (value->getType() == varType) return value;
2185 
2186  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2187  && "unexpected promotion type");
2188 
2189  if (isa<llvm::IntegerType>(varType))
2190  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2191 
2192  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2193 }
2194 
2195 /// Returns the attribute (either parameter attribute, or function
2196 /// attribute), which declares argument ArgNo to be non-null.
2197 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2198  QualType ArgType, unsigned ArgNo) {
2199  // FIXME: __attribute__((nonnull)) can also be applied to:
2200  // - references to pointers, where the pointee is known to be
2201  // nonnull (apparently a Clang extension)
2202  // - transparent unions containing pointers
2203  // In the former case, LLVM IR cannot represent the constraint. In
2204  // the latter case, we have no guarantee that the transparent union
2205  // is in fact passed as a pointer.
2206  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2207  return nullptr;
2208  // First, check attribute on parameter itself.
2209  if (PVD) {
2210  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2211  return ParmNNAttr;
2212  }
2213  // Check function attributes.
2214  if (!FD)
2215  return nullptr;
2216  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2217  if (NNAttr->isNonNull(ArgNo))
2218  return NNAttr;
2219  }
2220  return nullptr;
2221 }
2222 
2223 namespace {
2224  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2225  Address Temp;
2226  Address Arg;
2227  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2228  void Emit(CodeGenFunction &CGF, Flags flags) override {
2229  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2230  CGF.Builder.CreateStore(errorValue, Arg);
2231  }
2232  };
2233 }
2234 
2236  llvm::Function *Fn,
2237  const FunctionArgList &Args) {
2238  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2239  // Naked functions don't have prologues.
2240  return;
2241 
2242  // If this is an implicit-return-zero function, go ahead and
2243  // initialize the return value. TODO: it might be nice to have
2244  // a more general mechanism for this that didn't require synthesized
2245  // return statements.
2246  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2247  if (FD->hasImplicitReturnZero()) {
2248  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2249  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2250  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2251  Builder.CreateStore(Zero, ReturnValue);
2252  }
2253  }
2254 
2255  // FIXME: We no longer need the types from FunctionArgList; lift up and
2256  // simplify.
2257 
2258  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2259  // Flattened function arguments.
2261  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2262  for (auto &Arg : Fn->args()) {
2263  FnArgs.push_back(&Arg);
2264  }
2265  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2266 
2267  // If we're using inalloca, all the memory arguments are GEPs off of the last
2268  // parameter, which is a pointer to the complete memory area.
2269  Address ArgStruct = Address::invalid();
2270  const llvm::StructLayout *ArgStructLayout = nullptr;
2271  if (IRFunctionArgs.hasInallocaArg()) {
2272  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2273  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2274  FI.getArgStructAlignment());
2275 
2276  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2277  }
2278 
2279  // Name the struct return parameter.
2280  if (IRFunctionArgs.hasSRetArg()) {
2281  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2282  AI->setName("agg.result");
2283  AI->addAttr(llvm::Attribute::NoAlias);
2284  }
2285 
2286  // Track if we received the parameter as a pointer (indirect, byval, or
2287  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2288  // into a local alloca for us.
2290  ArgVals.reserve(Args.size());
2291 
2292  // Create a pointer value for every parameter declaration. This usually
2293  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2294  // any cleanups or do anything that might unwind. We do that separately, so
2295  // we can push the cleanups in the correct order for the ABI.
2296  assert(FI.arg_size() == Args.size() &&
2297  "Mismatch between function signature & arguments.");
2298  unsigned ArgNo = 0;
2300  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2301  i != e; ++i, ++info_it, ++ArgNo) {
2302  const VarDecl *Arg = *i;
2303  const ABIArgInfo &ArgI = info_it->info;
2304 
2305  bool isPromoted =
2306  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2307  // We are converting from ABIArgInfo type to VarDecl type directly, unless
2308  // the parameter is promoted. In this case we convert to
2309  // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2310  QualType Ty = isPromoted ? info_it->type : Arg->getType();
2311  assert(hasScalarEvaluationKind(Ty) ==
2312  hasScalarEvaluationKind(Arg->getType()));
2313 
2314  unsigned FirstIRArg, NumIRArgs;
2315  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2316 
2317  switch (ArgI.getKind()) {
2318  case ABIArgInfo::InAlloca: {
2319  assert(NumIRArgs == 0);
2320  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2321  CharUnits FieldOffset =
2322  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2323  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2324  Arg->getName());
2325  ArgVals.push_back(ParamValue::forIndirect(V));
2326  break;
2327  }
2328 
2329  case ABIArgInfo::Indirect: {
2330  assert(NumIRArgs == 1);
2331  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2332 
2333  if (!hasScalarEvaluationKind(Ty)) {
2334  // Aggregates and complex variables are accessed by reference. All we
2335  // need to do is realign the value, if requested.
2336  Address V = ParamAddr;
2337  if (ArgI.getIndirectRealign()) {
2338  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2339 
2340  // Copy from the incoming argument pointer to the temporary with the
2341  // appropriate alignment.
2342  //
2343  // FIXME: We should have a common utility for generating an aggregate
2344  // copy.
2346  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2347  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2348  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2349  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2350  V = AlignedTemp;
2351  }
2352  ArgVals.push_back(ParamValue::forIndirect(V));
2353  } else {
2354  // Load scalar value from indirect argument.
2355  llvm::Value *V =
2356  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2357 
2358  if (isPromoted)
2359  V = emitArgumentDemotion(*this, Arg, V);
2360  ArgVals.push_back(ParamValue::forDirect(V));
2361  }
2362  break;
2363  }
2364 
2365  case ABIArgInfo::Extend:
2366  case ABIArgInfo::Direct: {
2367 
2368  // If we have the trivial case, handle it with no muss and fuss.
2369  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2370  ArgI.getCoerceToType() == ConvertType(Ty) &&
2371  ArgI.getDirectOffset() == 0) {
2372  assert(NumIRArgs == 1);
2373  llvm::Value *V = FnArgs[FirstIRArg];
2374  auto AI = cast<llvm::Argument>(V);
2375 
2376  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2377  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2378  PVD->getFunctionScopeIndex()) &&
2379  !CGM.getCodeGenOpts().NullPointerIsValid)
2380  AI->addAttr(llvm::Attribute::NonNull);
2381 
2382  QualType OTy = PVD->getOriginalType();
2383  if (const auto *ArrTy =
2384  getContext().getAsConstantArrayType(OTy)) {
2385  // A C99 array parameter declaration with the static keyword also
2386  // indicates dereferenceability, and if the size is constant we can
2387  // use the dereferenceable attribute (which requires the size in
2388  // bytes).
2389  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2390  QualType ETy = ArrTy->getElementType();
2391  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2392  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2393  ArrSize) {
2394  llvm::AttrBuilder Attrs;
2395  Attrs.addDereferenceableAttr(
2396  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2397  AI->addAttrs(Attrs);
2398  } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2399  !CGM.getCodeGenOpts().NullPointerIsValid) {
2400  AI->addAttr(llvm::Attribute::NonNull);
2401  }
2402  }
2403  } else if (const auto *ArrTy =
2404  getContext().getAsVariableArrayType(OTy)) {
2405  // For C99 VLAs with the static keyword, we don't know the size so
2406  // we can't use the dereferenceable attribute, but in addrspace(0)
2407  // we know that it must be nonnull.
2408  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2409  !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2410  !CGM.getCodeGenOpts().NullPointerIsValid)
2411  AI->addAttr(llvm::Attribute::NonNull);
2412  }
2413 
2414  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2415  if (!AVAttr)
2416  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2417  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2418  if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2419  // If alignment-assumption sanitizer is enabled, we do *not* add
2420  // alignment attribute here, but emit normal alignment assumption,
2421  // so the UBSAN check could function.
2422  llvm::Value *AlignmentValue =
2423  EmitScalarExpr(AVAttr->getAlignment());
2424  llvm::ConstantInt *AlignmentCI =
2425  cast<llvm::ConstantInt>(AlignmentValue);
2426  unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2427  +llvm::Value::MaximumAlignment);
2428  AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2429  }
2430  }
2431 
2432  if (Arg->getType().isRestrictQualified())
2433  AI->addAttr(llvm::Attribute::NoAlias);
2434 
2435  // LLVM expects swifterror parameters to be used in very restricted
2436  // ways. Copy the value into a less-restricted temporary.
2437  if (FI.getExtParameterInfo(ArgNo).getABI()
2439  QualType pointeeTy = Ty->getPointeeType();
2440  assert(pointeeTy->isPointerType());
2441  Address temp =
2442  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2443  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2444  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2445  Builder.CreateStore(incomingErrorValue, temp);
2446  V = temp.getPointer();
2447 
2448  // Push a cleanup to copy the value back at the end of the function.
2449  // The convention does not guarantee that the value will be written
2450  // back if the function exits with an unwind exception.
2451  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2452  }
2453 
2454  // Ensure the argument is the correct type.
2455  if (V->getType() != ArgI.getCoerceToType())
2456  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2457 
2458  if (isPromoted)
2459  V = emitArgumentDemotion(*this, Arg, V);
2460 
2461  // Because of merging of function types from multiple decls it is
2462  // possible for the type of an argument to not match the corresponding
2463  // type in the function type. Since we are codegening the callee
2464  // in here, add a cast to the argument type.
2465  llvm::Type *LTy = ConvertType(Arg->getType());
2466  if (V->getType() != LTy)
2467  V = Builder.CreateBitCast(V, LTy);
2468 
2469  ArgVals.push_back(ParamValue::forDirect(V));
2470  break;
2471  }
2472 
2473  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2474  Arg->getName());
2475 
2476  // Pointer to store into.
2477  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2478 
2479  // Fast-isel and the optimizer generally like scalar values better than
2480  // FCAs, so we flatten them if this is safe to do for this argument.
2481  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2482  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2483  STy->getNumElements() > 1) {
2484  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2485  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2486  llvm::Type *DstTy = Ptr.getElementType();
2487  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2488 
2489  Address AddrToStoreInto = Address::invalid();
2490  if (SrcSize <= DstSize) {
2491  AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2492  } else {
2493  AddrToStoreInto =
2494  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2495  }
2496 
2497  assert(STy->getNumElements() == NumIRArgs);
2498  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2499  auto AI = FnArgs[FirstIRArg + i];
2500  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2501  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2502  Address EltPtr =
2503  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2504  Builder.CreateStore(AI, EltPtr);
2505  }
2506 
2507  if (SrcSize > DstSize) {
2508  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2509  }
2510 
2511  } else {
2512  // Simple case, just do a coerced store of the argument into the alloca.
2513  assert(NumIRArgs == 1);
2514  auto AI = FnArgs[FirstIRArg];
2515  AI->setName(Arg->getName() + ".coerce");
2516  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2517  }
2518 
2519  // Match to what EmitParmDecl is expecting for this type.
2521  llvm::Value *V =
2522  EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2523  if (isPromoted)
2524  V = emitArgumentDemotion(*this, Arg, V);
2525  ArgVals.push_back(ParamValue::forDirect(V));
2526  } else {
2527  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2528  }
2529  break;
2530  }
2531 
2533  // Reconstruct into a temporary.
2534  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2535  ArgVals.push_back(ParamValue::forIndirect(alloca));
2536 
2537  auto coercionType = ArgI.getCoerceAndExpandType();
2538  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2539  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2540 
2541  unsigned argIndex = FirstIRArg;
2542  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2543  llvm::Type *eltType = coercionType->getElementType(i);
2545  continue;
2546 
2547  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2548  auto elt = FnArgs[argIndex++];
2549  Builder.CreateStore(elt, eltAddr);
2550  }
2551  assert(argIndex == FirstIRArg + NumIRArgs);
2552  break;
2553  }
2554 
2555  case ABIArgInfo::Expand: {
2556  // If this structure was expanded into multiple arguments then
2557  // we need to create a temporary and reconstruct it from the
2558  // arguments.
2559  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2560  LValue LV = MakeAddrLValue(Alloca, Ty);
2561  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2562 
2563  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2564  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2565  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2566  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2567  auto AI = FnArgs[FirstIRArg + i];
2568  AI->setName(Arg->getName() + "." + Twine(i));
2569  }
2570  break;
2571  }
2572 
2573  case ABIArgInfo::Ignore:
2574  assert(NumIRArgs == 0);
2575  // Initialize the local variable appropriately.
2576  if (!hasScalarEvaluationKind(Ty)) {
2577  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2578  } else {
2579  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2580  ArgVals.push_back(ParamValue::forDirect(U));
2581  }
2582  break;
2583  }
2584  }
2585 
2586  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2587  for (int I = Args.size() - 1; I >= 0; --I)
2588  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2589  } else {
2590  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2591  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2592  }
2593 }
2594 
2595 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2596  while (insn->use_empty()) {
2597  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2598  if (!bitcast) return;
2599 
2600  // This is "safe" because we would have used a ConstantExpr otherwise.
2601  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2602  bitcast->eraseFromParent();
2603  }
2604 }
2605 
2606 /// Try to emit a fused autorelease of a return result.
2608  llvm::Value *result) {
2609  // We must be immediately followed the cast.
2610  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2611  if (BB->empty()) return nullptr;
2612  if (&BB->back() != result) return nullptr;
2613 
2614  llvm::Type *resultType = result->getType();
2615 
2616  // result is in a BasicBlock and is therefore an Instruction.
2617  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2618 
2619  SmallVector<llvm::Instruction *, 4> InstsToKill;
2620 
2621  // Look for:
2622  // %generator = bitcast %type1* %generator2 to %type2*
2623  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2624  // We would have emitted this as a constant if the operand weren't
2625  // an Instruction.
2626  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2627 
2628  // Require the generator to be immediately followed by the cast.
2629  if (generator->getNextNode() != bitcast)
2630  return nullptr;
2631 
2632  InstsToKill.push_back(bitcast);
2633  }
2634 
2635  // Look for:
2636  // %generator = call i8* @objc_retain(i8* %originalResult)
2637  // or
2638  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2639  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2640  if (!call) return nullptr;
2641 
2642  bool doRetainAutorelease;
2643 
2644  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2645  doRetainAutorelease = true;
2646  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2648  doRetainAutorelease = false;
2649 
2650  // If we emitted an assembly marker for this call (and the
2651  // ARCEntrypoints field should have been set if so), go looking
2652  // for that call. If we can't find it, we can't do this
2653  // optimization. But it should always be the immediately previous
2654  // instruction, unless we needed bitcasts around the call.
2656  llvm::Instruction *prev = call->getPrevNode();
2657  assert(prev);
2658  if (isa<llvm::BitCastInst>(prev)) {
2659  prev = prev->getPrevNode();
2660  assert(prev);
2661  }
2662  assert(isa<llvm::CallInst>(prev));
2663  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2665  InstsToKill.push_back(prev);
2666  }
2667  } else {
2668  return nullptr;
2669  }
2670 
2671  result = call->getArgOperand(0);
2672  InstsToKill.push_back(call);
2673 
2674  // Keep killing bitcasts, for sanity. Note that we no longer care
2675  // about precise ordering as long as there's exactly one use.
2676  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2677  if (!bitcast->hasOneUse()) break;
2678  InstsToKill.push_back(bitcast);
2679  result = bitcast->getOperand(0);
2680  }
2681 
2682  // Delete all the unnecessary instructions, from latest to earliest.
2683  for (auto *I : InstsToKill)
2684  I->eraseFromParent();
2685 
2686  // Do the fused retain/autorelease if we were asked to.
2687  if (doRetainAutorelease)
2688  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2689 
2690  // Cast back to the result type.
2691  return CGF.Builder.CreateBitCast(result, resultType);
2692 }
2693 
2694 /// If this is a +1 of the value of an immutable 'self', remove it.
2696  llvm::Value *result) {
2697  // This is only applicable to a method with an immutable 'self'.
2698  const ObjCMethodDecl *method =
2699  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2700  if (!method) return nullptr;
2701  const VarDecl *self = method->getSelfDecl();
2702  if (!self->getType().isConstQualified()) return nullptr;
2703 
2704  // Look for a retain call.
2705  llvm::CallInst *retainCall =
2706  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2707  if (!retainCall ||
2708  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2709  return nullptr;
2710 
2711  // Look for an ordinary load of 'self'.
2712  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2713  llvm::LoadInst *load =
2714  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2715  if (!load || load->isAtomic() || load->isVolatile() ||
2716  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2717  return nullptr;
2718 
2719  // Okay! Burn it all down. This relies for correctness on the
2720  // assumption that the retain is emitted as part of the return and
2721  // that thereafter everything is used "linearly".
2722  llvm::Type *resultType = result->getType();
2723  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2724  assert(retainCall->use_empty());
2725  retainCall->eraseFromParent();
2726  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2727 
2728  return CGF.Builder.CreateBitCast(load, resultType);
2729 }
2730 
2731 /// Emit an ARC autorelease of the result of a function.
2732 ///
2733 /// \return the value to actually return from the function
2735  llvm::Value *result) {
2736  // If we're returning 'self', kill the initial retain. This is a
2737  // heuristic attempt to "encourage correctness" in the really unfortunate
2738  // case where we have a return of self during a dealloc and we desperately
2739  // need to avoid the possible autorelease.
2740  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2741  return self;
2742 
2743  // At -O0, try to emit a fused retain/autorelease.
2744  if (CGF.shouldUseFusedARCCalls())
2745  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2746  return fused;
2747 
2748  return CGF.EmitARCAutoreleaseReturnValue(result);
2749 }
2750 
2751 /// Heuristically search for a dominating store to the return-value slot.
2752 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2753  // Check if a User is a store which pointerOperand is the ReturnValue.
2754  // We are looking for stores to the ReturnValue, not for stores of the
2755  // ReturnValue to some other location.
2756  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2757  auto *SI = dyn_cast<llvm::StoreInst>(U);
2758  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2759  return nullptr;
2760  // These aren't actually possible for non-coerced returns, and we
2761  // only care about non-coerced returns on this code path.
2762  assert(!SI->isAtomic() && !SI->isVolatile());
2763  return SI;
2764  };
2765  // If there are multiple uses of the return-value slot, just check
2766  // for something immediately preceding the IP. Sometimes this can
2767  // happen with how we generate implicit-returns; it can also happen
2768  // with noreturn cleanups.
2769  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2770  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2771  if (IP->empty()) return nullptr;
2772  llvm::Instruction *I = &IP->back();
2773 
2774  // Skip lifetime markers
2775  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2776  IE = IP->rend();
2777  II != IE; ++II) {
2778  if (llvm::IntrinsicInst *Intrinsic =
2779  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2780  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2781  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2782  ++II;
2783  if (II == IE)
2784  break;
2785  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2786  continue;
2787  }
2788  }
2789  I = &*II;
2790  break;
2791  }
2792 
2793  return GetStoreIfValid(I);
2794  }
2795 
2796  llvm::StoreInst *store =
2797  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2798  if (!store) return nullptr;
2799 
2800  // Now do a first-and-dirty dominance check: just walk up the
2801  // single-predecessors chain from the current insertion point.
2802  llvm::BasicBlock *StoreBB = store->getParent();
2803  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2804  while (IP != StoreBB) {
2805  if (!(IP = IP->getSinglePredecessor()))
2806  return nullptr;
2807  }
2808 
2809  // Okay, the store's basic block dominates the insertion point; we
2810  // can do our thing.
2811  return store;
2812 }
2813 
2815  bool EmitRetDbgLoc,
2816  SourceLocation EndLoc) {
2817  if (FI.isNoReturn()) {
2818  // Noreturn functions don't return.
2819  EmitUnreachable(EndLoc);
2820  return;
2821  }
2822 
2823  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2824  // Naked functions don't have epilogues.
2825  Builder.CreateUnreachable();
2826  return;
2827  }
2828 
2829  // Functions with no result always return void.
2830  if (!ReturnValue.isValid()) {
2831  Builder.CreateRetVoid();
2832  return;
2833  }
2834 
2835  llvm::DebugLoc RetDbgLoc;
2836  llvm::Value *RV = nullptr;
2837  QualType RetTy = FI.getReturnType();
2838  const ABIArgInfo &RetAI = FI.getReturnInfo();
2839 
2840  switch (RetAI.getKind()) {
2841  case ABIArgInfo::InAlloca:
2842  // Aggregrates get evaluated directly into the destination. Sometimes we
2843  // need to return the sret value in a register, though.
2844  assert(hasAggregateEvaluationKind(RetTy));
2845  if (RetAI.getInAllocaSRet()) {
2846  llvm::Function::arg_iterator EI = CurFn->arg_end();
2847  --EI;
2848  llvm::Value *ArgStruct = &*EI;
2849  llvm::Value *SRet = Builder.CreateStructGEP(
2850  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2851  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2852  }
2853  break;
2854 
2855  case ABIArgInfo::Indirect: {
2856  auto AI = CurFn->arg_begin();
2857  if (RetAI.isSRetAfterThis())
2858  ++AI;
2859  switch (getEvaluationKind(RetTy)) {
2860  case TEK_Complex: {
2861  ComplexPairTy RT =
2862  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2863  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2864  /*isInit*/ true);
2865  break;
2866  }
2867  case TEK_Aggregate:
2868  // Do nothing; aggregrates get evaluated directly into the destination.
2869  break;
2870  case TEK_Scalar:
2871  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2872  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2873  /*isInit*/ true);
2874  break;
2875  }
2876  break;
2877  }
2878 
2879  case ABIArgInfo::Extend:
2880  case ABIArgInfo::Direct:
2881  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2882  RetAI.getDirectOffset() == 0) {
2883  // The internal return value temp always will have pointer-to-return-type
2884  // type, just do a load.
2885 
2886  // If there is a dominating store to ReturnValue, we can elide
2887  // the load, zap the store, and usually zap the alloca.
2888  if (llvm::StoreInst *SI =
2890  // Reuse the debug location from the store unless there is
2891  // cleanup code to be emitted between the store and return
2892  // instruction.
2893  if (EmitRetDbgLoc && !AutoreleaseResult)
2894  RetDbgLoc = SI->getDebugLoc();
2895  // Get the stored value and nuke the now-dead store.
2896  RV = SI->getValueOperand();
2897  SI->eraseFromParent();
2898 
2899  // If that was the only use of the return value, nuke it as well now.
2900  auto returnValueInst = ReturnValue.getPointer();
2901  if (returnValueInst->use_empty()) {
2902  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2903  alloca->eraseFromParent();
2904  ReturnValue = Address::invalid();
2905  }
2906  }
2907 
2908  // Otherwise, we have to do a simple load.
2909  } else {
2910  RV = Builder.CreateLoad(ReturnValue);
2911  }
2912  } else {
2913  // If the value is offset in memory, apply the offset now.
2914  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2915 
2916  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2917  }
2918 
2919  // In ARC, end functions that return a retainable type with a call
2920  // to objc_autoreleaseReturnValue.
2921  if (AutoreleaseResult) {
2922 #ifndef NDEBUG
2923  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2924  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2925  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2926  // CurCodeDecl or BlockInfo.
2927  QualType RT;
2928 
2929  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2930  RT = FD->getReturnType();
2931  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2932  RT = MD->getReturnType();
2933  else if (isa<BlockDecl>(CurCodeDecl))
2934  RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2935  else
2936  llvm_unreachable("Unexpected function/method type");
2937 
2938  assert(getLangOpts().ObjCAutoRefCount &&
2939  !FI.isReturnsRetained() &&
2940  RT->isObjCRetainableType());
2941 #endif
2942  RV = emitAutoreleaseOfResult(*this, RV);
2943  }
2944 
2945  break;
2946 
2947  case ABIArgInfo::Ignore:
2948  break;
2949 
2951  auto coercionType = RetAI.getCoerceAndExpandType();
2952  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2953 
2954  // Load all of the coerced elements out into results.
2956  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2957  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2958  auto coercedEltType = coercionType->getElementType(i);
2959  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2960  continue;
2961 
2962  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2963  auto elt = Builder.CreateLoad(eltAddr);
2964  results.push_back(elt);
2965  }
2966 
2967  // If we have one result, it's the single direct result type.
2968  if (results.size() == 1) {
2969  RV = results[0];
2970 
2971  // Otherwise, we need to make a first-class aggregate.
2972  } else {
2973  // Construct a return type that lacks padding elements.
2974  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2975 
2976  RV = llvm::UndefValue::get(returnType);
2977  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2978  RV = Builder.CreateInsertValue(RV, results[i], i);
2979  }
2980  }
2981  break;
2982  }
2983 
2984  case ABIArgInfo::Expand:
2985  llvm_unreachable("Invalid ABI kind for return argument");
2986  }
2987 
2988  llvm::Instruction *Ret;
2989  if (RV) {
2990  EmitReturnValueCheck(RV);
2991  Ret = Builder.CreateRet(RV);
2992  } else {
2993  Ret = Builder.CreateRetVoid();
2994  }
2995 
2996  if (RetDbgLoc)
2997  Ret->setDebugLoc(std::move(RetDbgLoc));
2998 }
2999 
3001  // A current decl may not be available when emitting vtable thunks.
3002  if (!CurCodeDecl)
3003  return;
3004 
3005  ReturnsNonNullAttr *RetNNAttr = nullptr;
3006  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3007  RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3008 
3009  if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3010  return;
3011 
3012  // Prefer the returns_nonnull attribute if it's present.
3013  SourceLocation AttrLoc;
3014  SanitizerMask CheckKind;
3015  SanitizerHandler Handler;
3016  if (RetNNAttr) {
3017  assert(!requiresReturnValueNullabilityCheck() &&
3018  "Cannot check nullability and the nonnull attribute");
3019  AttrLoc = RetNNAttr->getLocation();
3020  CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3021  Handler = SanitizerHandler::NonnullReturn;
3022  } else {
3023  if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3024  if (auto *TSI = DD->getTypeSourceInfo())
3025  if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3026  AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3027  CheckKind = SanitizerKind::NullabilityReturn;
3028  Handler = SanitizerHandler::NullabilityReturn;
3029  }
3030 
3031  SanitizerScope SanScope(this);
3032 
3033  // Make sure the "return" source location is valid. If we're checking a
3034  // nullability annotation, make sure the preconditions for the check are met.
3035  llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3036  llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3037  llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3038  llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3039  if (requiresReturnValueNullabilityCheck())
3040  CanNullCheck =
3041  Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3042  Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3043  EmitBlock(Check);
3044 
3045  // Now do the null check.
3046  llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3047  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3048  llvm::Value *DynamicData[] = {SLocPtr};
3049  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3050 
3051  EmitBlock(NoCheck);
3052 
3053 #ifndef NDEBUG
3054  // The return location should not be used after the check has been emitted.
3055  ReturnLocation = Address::invalid();
3056 #endif
3057 }
3058 
3060  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3061  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3062 }
3063 
3065  QualType Ty) {
3066  // FIXME: Generate IR in one pass, rather than going back and fixing up these
3067  // placeholders.
3068  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3069  llvm::Type *IRPtrTy = IRTy->getPointerTo();
3070  llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3071 
3072  // FIXME: When we generate this IR in one pass, we shouldn't need
3073  // this win32-specific alignment hack.
3075  Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3076 
3077  return AggValueSlot::forAddr(Address(Placeholder, Align),
3078  Ty.getQualifiers(),
3083 }
3084 
3086  const VarDecl *param,
3087  SourceLocation loc) {
3088  // StartFunction converted the ABI-lowered parameter(s) into a
3089  // local alloca. We need to turn that into an r-value suitable
3090  // for EmitCall.
3091  Address local = GetAddrOfLocalVar(param);
3092 
3093  QualType type = param->getType();
3094 
3095  if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3096  CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3097  }
3098 
3099  // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3100  // but the argument needs to be the original pointer.
3101  if (type->isReferenceType()) {
3102  args.add(RValue::get(Builder.CreateLoad(local)), type);
3103 
3104  // In ARC, move out of consumed arguments so that the release cleanup
3105  // entered by StartFunction doesn't cause an over-release. This isn't
3106  // optimal -O0 code generation, but it should get cleaned up when
3107  // optimization is enabled. This also assumes that delegate calls are
3108  // performed exactly once for a set of arguments, but that should be safe.
3109  } else if (getLangOpts().ObjCAutoRefCount &&
3110  param->hasAttr<NSConsumedAttr>() &&
3111  type->isObjCRetainableType()) {
3112  llvm::Value *ptr = Builder.CreateLoad(local);
3113  auto null =
3114  llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3115  Builder.CreateStore(null, local);
3116  args.add(RValue::get(ptr), type);
3117 
3118  // For the most part, we just need to load the alloca, except that
3119  // aggregate r-values are actually pointers to temporaries.
3120  } else {
3121  args.add(convertTempToRValue(local, type, loc), type);
3122  }
3123 
3124  // Deactivate the cleanup for the callee-destructed param that was pushed.
3125  if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3127  type.isDestructedType()) {
3129  CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3130  assert(cleanup.isValid() &&
3131  "cleanup for callee-destructed param not recorded");
3132  // This unreachable is a temporary marker which will be removed later.
3133  llvm::Instruction *isActive = Builder.CreateUnreachable();
3134  args.addArgCleanupDeactivation(cleanup, isActive);
3135  }
3136 }
3137 
3138 static bool isProvablyNull(llvm::Value *addr) {
3139  return isa<llvm::ConstantPointerNull>(addr);
3140 }
3141 
3142 /// Emit the actual writing-back of a writeback.
3144  const CallArgList::Writeback &writeback) {
3145  const LValue &srcLV = writeback.Source;
3146  Address srcAddr = srcLV.getAddress();
3147  assert(!isProvablyNull(srcAddr.getPointer()) &&
3148  "shouldn't have writeback for provably null argument");
3149 
3150  llvm::BasicBlock *contBB = nullptr;
3151 
3152  // If the argument wasn't provably non-null, we need to null check
3153  // before doing the store.
3154  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3155  CGF.CGM.getDataLayout());
3156  if (!provablyNonNull) {
3157  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3158  contBB = CGF.createBasicBlock("icr.done");
3159 
3160  llvm::Value *isNull =
3161  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3162  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3163  CGF.EmitBlock(writebackBB);
3164  }
3165 
3166  // Load the value to writeback.
3167  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3168 
3169  // Cast it back, in case we're writing an id to a Foo* or something.
3170  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3171  "icr.writeback-cast");
3172 
3173  // Perform the writeback.
3174 
3175  // If we have a "to use" value, it's something we need to emit a use
3176  // of. This has to be carefully threaded in: if it's done after the
3177  // release it's potentially undefined behavior (and the optimizer
3178  // will ignore it), and if it happens before the retain then the
3179  // optimizer could move the release there.
3180  if (writeback.ToUse) {
3181  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3182 
3183  // Retain the new value. No need to block-copy here: the block's
3184  // being passed up the stack.
3185  value = CGF.EmitARCRetainNonBlock(value);
3186 
3187  // Emit the intrinsic use here.
3188  CGF.EmitARCIntrinsicUse(writeback.ToUse);
3189 
3190  // Load the old value (primitively).
3191  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3192 
3193  // Put the new value in place (primitively).
3194  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3195 
3196  // Release the old value.
3197  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3198 
3199  // Otherwise, we can just do a normal lvalue store.
3200  } else {
3201  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3202  }
3203 
3204  // Jump to the continuation block.
3205  if (!provablyNonNull)
3206  CGF.EmitBlock(contBB);
3207 }
3208 
3210  const CallArgList &args) {
3211  for (const auto &I : args.writebacks())
3212  emitWriteback(CGF, I);
3213 }
3214 
3216  const CallArgList &CallArgs) {
3218  CallArgs.getCleanupsToDeactivate();
3219  // Iterate in reverse to increase the likelihood of popping the cleanup.
3220  for (const auto &I : llvm::reverse(Cleanups)) {
3221  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3222  I.IsActiveIP->eraseFromParent();
3223  }
3224 }
3225 
3226 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3227  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3228  if (uop->getOpcode() == UO_AddrOf)
3229  return uop->getSubExpr();
3230  return nullptr;
3231 }
3232 
3233 /// Emit an argument that's being passed call-by-writeback. That is,
3234 /// we are passing the address of an __autoreleased temporary; it
3235 /// might be copy-initialized with the current value of the given
3236 /// address, but it will definitely be copied out of after the call.
3238  const ObjCIndirectCopyRestoreExpr *CRE) {
3239  LValue srcLV;
3240 
3241  // Make an optimistic effort to emit the address as an l-value.
3242  // This can fail if the argument expression is more complicated.
3243  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3244  srcLV = CGF.EmitLValue(lvExpr);
3245 
3246  // Otherwise, just emit it as a scalar.
3247  } else {
3248  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3249 
3250  QualType srcAddrType =
3251  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3252  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3253  }
3254  Address srcAddr = srcLV.getAddress();
3255 
3256  // The dest and src types don't necessarily match in LLVM terms
3257  // because of the crazy ObjC compatibility rules.
3258 
3259  llvm::PointerType *destType =
3260  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3261 
3262  // If the address is a constant null, just pass the appropriate null.
3263  if (isProvablyNull(srcAddr.getPointer())) {
3264  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3265  CRE->getType());
3266  return;
3267  }
3268 
3269  // Create the temporary.
3270  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3271  CGF.getPointerAlign(),
3272  "icr.temp");
3273  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3274  // and that cleanup will be conditional if we can't prove that the l-value
3275  // isn't null, so we need to register a dominating point so that the cleanups
3276  // system will make valid IR.
3278 
3279  // Zero-initialize it if we're not doing a copy-initialization.
3280  bool shouldCopy = CRE->shouldCopy();
3281  if (!shouldCopy) {
3282  llvm::Value *null =
3283  llvm::ConstantPointerNull::get(
3284  cast<llvm::PointerType>(destType->getElementType()));
3285  CGF.Builder.CreateStore(null, temp);
3286  }
3287 
3288  llvm::BasicBlock *contBB = nullptr;
3289  llvm::BasicBlock *originBB = nullptr;
3290 
3291  // If the address is *not* known to be non-null, we need to switch.
3292  llvm::Value *finalArgument;
3293 
3294  bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3295  CGF.CGM.getDataLayout());
3296  if (provablyNonNull) {
3297  finalArgument = temp.getPointer();
3298  } else {
3299  llvm::Value *isNull =
3300  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3301 
3302  finalArgument = CGF.Builder.CreateSelect(isNull,
3303  llvm::ConstantPointerNull::get(destType),
3304  temp.getPointer(), "icr.argument");
3305 
3306  // If we need to copy, then the load has to be conditional, which
3307  // means we need control flow.
3308  if (shouldCopy) {
3309  originBB = CGF.Builder.GetInsertBlock();
3310  contBB = CGF.createBasicBlock("icr.cont");
3311  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3312  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3313  CGF.EmitBlock(copyBB);
3314  condEval.begin(CGF);
3315  }
3316  }
3317 
3318  llvm::Value *valueToUse = nullptr;
3319 
3320  // Perform a copy if necessary.
3321  if (shouldCopy) {
3322  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3323  assert(srcRV.isScalar());
3324 
3325  llvm::Value *src = srcRV.getScalarVal();
3326  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3327  "icr.cast");
3328 
3329  // Use an ordinary store, not a store-to-lvalue.
3330  CGF.Builder.CreateStore(src, temp);
3331 
3332  // If optimization is enabled, and the value was held in a
3333  // __strong variable, we need to tell the optimizer that this
3334  // value has to stay alive until we're doing the store back.
3335  // This is because the temporary is effectively unretained,
3336  // and so otherwise we can violate the high-level semantics.
3337  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3339  valueToUse = src;
3340  }
3341  }
3342 
3343  // Finish the control flow if we needed it.
3344  if (shouldCopy && !provablyNonNull) {
3345  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3346  CGF.EmitBlock(contBB);
3347 
3348  // Make a phi for the value to intrinsically use.
3349  if (valueToUse) {
3350  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3351  "icr.to-use");
3352  phiToUse->addIncoming(valueToUse, copyBB);
3353  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3354  originBB);
3355  valueToUse = phiToUse;
3356  }
3357 
3358  condEval.end(CGF);
3359  }
3360 
3361  args.addWriteback(srcLV, temp, valueToUse);
3362  args.add(RValue::get(finalArgument), CRE->getType());
3363 }
3364 
3366  assert(!StackBase);
3367 
3368  // Save the stack.
3369  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3370  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3371 }
3372 
3374  if (StackBase) {
3375  // Restore the stack after the call.
3376  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3377  CGF.Builder.CreateCall(F, StackBase);
3378  }
3379 }
3380 
3382  SourceLocation ArgLoc,
3383  AbstractCallee AC,
3384  unsigned ParmNum) {
3385  if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3386  SanOpts.has(SanitizerKind::NullabilityArg)))
3387  return;
3388 
3389  // The param decl may be missing in a variadic function.
3390  auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3391  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3392 
3393  // Prefer the nonnull attribute if it's present.
3394  const NonNullAttr *NNAttr = nullptr;
3395  if (SanOpts.has(SanitizerKind::NonnullAttribute))
3396  NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3397 
3398  bool CanCheckNullability = false;
3399  if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3400  auto Nullability = PVD->getType()->getNullability(getContext());
3401  CanCheckNullability = Nullability &&
3403  PVD->getTypeSourceInfo();
3404  }
3405 
3406  if (!NNAttr && !CanCheckNullability)
3407  return;
3408 
3409  SourceLocation AttrLoc;
3410  SanitizerMask CheckKind;
3411  SanitizerHandler Handler;
3412  if (NNAttr) {
3413  AttrLoc = NNAttr->getLocation();
3414  CheckKind = SanitizerKind::NonnullAttribute;
3415  Handler = SanitizerHandler::NonnullArg;
3416  } else {
3417  AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3418  CheckKind = SanitizerKind::NullabilityArg;
3419  Handler = SanitizerHandler::NullabilityArg;
3420  }
3421 
3422  SanitizerScope SanScope(this);
3423  assert(RV.isScalar());
3424  llvm::Value *V = RV.getScalarVal();
3425  llvm::Value *Cond =
3426  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3427  llvm::Constant *StaticData[] = {
3428  EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3429  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3430  };
3431  EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3432 }
3433 
3435  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3436  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3437  AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3438  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3439 
3440  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3441  // because arguments are destroyed left to right in the callee. As a special
3442  // case, there are certain language constructs that require left-to-right
3443  // evaluation, and in those cases we consider the evaluation order requirement
3444  // to trump the "destruction order is reverse construction order" guarantee.
3445  bool LeftToRight =
3446  CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3447  ? Order == EvaluationOrder::ForceLeftToRight
3448  : Order != EvaluationOrder::ForceRightToLeft;
3449 
3450  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3451  RValue EmittedArg) {
3452  if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3453  return;
3454  auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3455  if (PS == nullptr)
3456  return;
3457 
3458  const auto &Context = getContext();
3459  auto SizeTy = Context.getSizeType();
3460  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3461  assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3462  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3463  EmittedArg.getScalarVal());
3464  Args.add(RValue::get(V), SizeTy);
3465  // If we're emitting args in reverse, be sure to do so with
3466  // pass_object_size, as well.
3467  if (!LeftToRight)
3468  std::swap(Args.back(), *(&Args.back() - 1));
3469  };
3470 
3471  // Insert a stack save if we're going to need any inalloca args.
3472  bool HasInAllocaArgs = false;
3473  if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3474  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3475  I != E && !HasInAllocaArgs; ++I)
3476  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3477  if (HasInAllocaArgs) {
3478  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3479  Args.allocateArgumentMemory(*this);
3480  }
3481  }
3482 
3483  // Evaluate each argument in the appropriate order.
3484  size_t CallArgsStart = Args.size();
3485  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3486  unsigned Idx = LeftToRight ? I : E - I - 1;
3487  CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3488  unsigned InitialArgSize = Args.size();
3489  // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3490  // the argument and parameter match or the objc method is parameterized.
3491  assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3492  getContext().hasSameUnqualifiedType((*Arg)->getType(),
3493  ArgTypes[Idx]) ||
3494  (isa<ObjCMethodDecl>(AC.getDecl()) &&
3495  isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3496  "Argument and parameter types don't match");
3497  EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3498  // In particular, we depend on it being the last arg in Args, and the
3499  // objectsize bits depend on there only being one arg if !LeftToRight.
3500  assert(InitialArgSize + 1 == Args.size() &&
3501  "The code below depends on only adding one arg per EmitCallArg");
3502  (void)InitialArgSize;
3503  // Since pointer argument are never emitted as LValue, it is safe to emit
3504  // non-null argument check for r-value only.
3505  if (!Args.back().hasLValue()) {
3506  RValue RVArg = Args.back().getKnownRValue();
3507  EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3508  ParamsToSkip + Idx);
3509  // @llvm.objectsize should never have side-effects and shouldn't need
3510  // destruction/cleanups, so we can safely "emit" it after its arg,
3511  // regardless of right-to-leftness
3512  MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3513  }
3514  }
3515 
3516  if (!LeftToRight) {
3517  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3518  // IR function.
3519  std::reverse(Args.begin() + CallArgsStart, Args.end());
3520  }
3521 }
3522 
3523 namespace {
3524 
3525 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3526  DestroyUnpassedArg(Address Addr, QualType Ty)
3527  : Addr(Addr), Ty(Ty) {}
3528 
3529  Address Addr;
3530  QualType Ty;
3531 
3532  void Emit(CodeGenFunction &CGF, Flags flags) override {
3534  if (DtorKind == QualType::DK_cxx_destructor) {
3535  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3536  assert(!Dtor->isTrivial());
3537  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3538  /*Delegating=*/false, Addr);
3539  } else {
3540  CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3541  }
3542  }
3543 };
3544 
3545 struct DisableDebugLocationUpdates {
3546  CodeGenFunction &CGF;
3547  bool disabledDebugInfo;
3548  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3549  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3550  CGF.disableDebugInfo();
3551  }
3552  ~DisableDebugLocationUpdates() {
3553  if (disabledDebugInfo)
3554  CGF.enableDebugInfo();
3555  }
3556 };
3557 
3558 } // end anonymous namespace
3559 
3561  if (!HasLV)
3562  return RV;
3563  LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3565  LV.isVolatile());
3566  IsUsed = true;
3567  return RValue::getAggregate(Copy.getAddress());
3568 }
3569 
3571  LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3572  if (!HasLV && RV.isScalar())
3573  CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3574  else if (!HasLV && RV.isComplex())
3575  CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3576  else {
3577  auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3578  LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3579  // We assume that call args are never copied into subobjects.
3580  CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3581  HasLV ? LV.isVolatileQualified()
3582  : RV.isVolatileQualified());
3583  }
3584  IsUsed = true;
3585 }
3586 
3588  QualType type) {
3589  DisableDebugLocationUpdates Dis(*this, E);
3590  if (const ObjCIndirectCopyRestoreExpr *CRE
3591  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3592  assert(getLangOpts().ObjCAutoRefCount);
3593  return emitWritebackArg(*this, args, CRE);
3594  }
3595 
3596  assert(type->isReferenceType() == E->isGLValue() &&
3597  "reference binding to unmaterialized r-value!");
3598 
3599  if (E->isGLValue()) {
3600  assert(E->getObjectKind() == OK_Ordinary);
3601  return args.add(EmitReferenceBindingToExpr(E), type);
3602  }
3603 
3604  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3605 
3606  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3607  // However, we still have to push an EH-only cleanup in case we unwind before
3608  // we make it to the call.
3609  if (HasAggregateEvalKind &&
3611  // If we're using inalloca, use the argument memory. Otherwise, use a
3612  // temporary.
3613  AggValueSlot Slot;
3614  if (args.isUsingInAlloca())
3615  Slot = createPlaceholderSlot(*this, type);
3616  else
3617  Slot = CreateAggTemp(type, "agg.tmp");
3618 
3619  bool DestroyedInCallee = true, NeedsEHCleanup = true;
3620  if (const auto *RD = type->getAsCXXRecordDecl())
3621  DestroyedInCallee = RD->hasNonTrivialDestructor();
3622  else
3623  NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3624 
3625  if (DestroyedInCallee)
3626  Slot.setExternallyDestructed();
3627 
3628  EmitAggExpr(E, Slot);
3629  RValue RV = Slot.asRValue();
3630  args.add(RV, type);
3631 
3632  if (DestroyedInCallee && NeedsEHCleanup) {
3633  // Create a no-op GEP between the placeholder and the cleanup so we can
3634  // RAUW it successfully. It also serves as a marker of the first
3635  // instruction where the cleanup is active.
3636  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3637  type);
3638  // This unreachable is a temporary marker which will be removed later.
3639  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3640  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3641  }
3642  return;
3643  }
3644 
3645  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3646  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3647  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3648  assert(L.isSimple());
3649  args.addUncopiedAggregate(L, type);
3650  return;
3651  }
3652 
3653  args.add(EmitAnyExprToTemp(E), type);
3654 }
3655 
3656 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3657  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3658  // implicitly widens null pointer constants that are arguments to varargs
3659  // functions to pointer-sized ints.
3660  if (!getTarget().getTriple().isOSWindows())
3661  return Arg->getType();
3662 
3663  if (Arg->getType()->isIntegerType() &&
3664  getContext().getTypeSize(Arg->getType()) <
3668  return getContext().getIntPtrType();
3669  }
3670 
3671  return Arg->getType();
3672 }
3673 
3674 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3675 // optimizer it can aggressively ignore unwind edges.
3676 void
3677 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3678  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3679  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3680  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3681  CGM.getNoObjCARCExceptionsMetadata());
3682 }
3683 
3684 /// Emits a call to the given no-arguments nounwind runtime function.
3685 llvm::CallInst *
3687  const llvm::Twine &name) {
3688  return EmitNounwindRuntimeCall(callee, None, name);
3689 }
3690 
3691 /// Emits a call to the given nounwind runtime function.
3692 llvm::CallInst *
3695  const llvm::Twine &name) {
3696  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3697  call->setDoesNotThrow();
3698  return call;
3699 }
3700 
3701 /// Emits a simple call (never an invoke) to the given no-arguments
3702 /// runtime function.
3703 llvm::CallInst *
3705  const llvm::Twine &name) {
3706  return EmitRuntimeCall(callee, None, name);
3707 }
3708 
3709 // Calls which may throw must have operand bundles indicating which funclet
3710 // they are nested within.
3714  // There is no need for a funclet operand bundle if we aren't inside a
3715  // funclet.
3716  if (!CurrentFuncletPad)
3717  return BundleList;
3718 
3719  // Skip intrinsics which cannot throw.
3720  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3721  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3722  return BundleList;
3723 
3724  BundleList.emplace_back("funclet", CurrentFuncletPad);
3725  return BundleList;
3726 }
3727 
3728 /// Emits a simple call (never an invoke) to the given runtime function.
3729 llvm::CallInst *
3732  const llvm::Twine &name) {
3733  llvm::CallInst *call =
3734  Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3735  call->setCallingConv(getRuntimeCC());
3736  return call;
3737 }
3738 
3739 /// Emits a call or invoke to the given noreturn runtime function.
3741  ArrayRef<llvm::Value*> args) {
3743  getBundlesForFunclet(callee);
3744 
3745  if (getInvokeDest()) {
3746  llvm::InvokeInst *invoke =
3747  Builder.CreateInvoke(callee,
3748  getUnreachableBlock(),
3749  getInvokeDest(),
3750  args,
3751  BundleList);
3752  invoke->setDoesNotReturn();
3753  invoke->setCallingConv(getRuntimeCC());
3754  } else {
3755  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3756  call->setDoesNotReturn();
3757  call->setCallingConv(getRuntimeCC());
3758  Builder.CreateUnreachable();
3759  }
3760 }
3761 
3762 /// Emits a call or invoke instruction to the given nullary runtime function.
3763 llvm::CallSite
3765  const Twine &name) {
3766  return EmitRuntimeCallOrInvoke(callee, None, name);
3767 }
3768 
3769 /// Emits a call or invoke instruction to the given runtime function.
3770 llvm::CallSite
3773  const Twine &name) {
3774  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3775  callSite.setCallingConv(getRuntimeCC());
3776  return callSite;
3777 }
3778 
3779 /// Emits a call or invoke instruction to the given function, depending
3780 /// on the current state of the EH stack.
3781 llvm::CallSite
3784  const Twine &Name) {
3785  llvm::BasicBlock *InvokeDest = getInvokeDest();
3787  getBundlesForFunclet(Callee);
3788 
3789  llvm::Instruction *Inst;
3790  if (!InvokeDest)
3791  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3792  else {
3793  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3794  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3795  Name);
3796  EmitBlock(ContBB);
3797  }
3798 
3799  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3800  // optimizer it can aggressively ignore unwind edges.
3801  if (CGM.getLangOpts().ObjCAutoRefCount)
3802  AddObjCARCExceptionMetadata(Inst);
3803 
3804  return llvm::CallSite(Inst);
3805 }
3806 
3807 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3808  llvm::Value *New) {
3809  DeferredReplacements.push_back(std::make_pair(Old, New));
3810 }
3811 
3813  const CGCallee &Callee,
3814  ReturnValueSlot ReturnValue,
3815  const CallArgList &CallArgs,
3816  llvm::Instruction **callOrInvoke,
3817  SourceLocation Loc) {
3818  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3819 
3820  assert(Callee.isOrdinary() || Callee.isVirtual());
3821 
3822  // Handle struct-return functions by passing a pointer to the
3823  // location that we would like to return into.
3824  QualType RetTy = CallInfo.getReturnType();
3825  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3826 
3827  llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3828 
3829  // 1. Set up the arguments.
3830 
3831  // If we're using inalloca, insert the allocation after the stack save.
3832  // FIXME: Do this earlier rather than hacking it in here!
3833  Address ArgMemory = Address::invalid();
3834  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3835  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3836  const llvm::DataLayout &DL = CGM.getDataLayout();
3837  ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3838  llvm::Instruction *IP = CallArgs.getStackBase();
3839  llvm::AllocaInst *AI;
3840  if (IP) {
3841  IP = IP->getNextNode();
3842  AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3843  "argmem", IP);
3844  } else {
3845  AI = CreateTempAlloca(ArgStruct, "argmem");
3846  }
3847  auto Align = CallInfo.getArgStructAlignment();
3848  AI->setAlignment(Align.getQuantity());
3849  AI->setUsedWithInAlloca(true);
3850  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3851  ArgMemory = Address(AI, Align);
3852  }
3853 
3854  // Helper function to drill into the inalloca allocation.
3855  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3856  auto FieldOffset =
3857  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3858  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3859  };
3860 
3861  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3862  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3863 
3864  // If the call returns a temporary with struct return, create a temporary
3865  // alloca to hold the result, unless one is given to us.
3866  Address SRetPtr = Address::invalid();
3867  Address SRetAlloca = Address::invalid();
3868  llvm::Value *UnusedReturnSizePtr = nullptr;
3869  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3870  if (!ReturnValue.isNull()) {
3871  SRetPtr = ReturnValue.getValue();
3872  } else {
3873  SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3874  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3875  uint64_t size =
3876  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3877  UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3878  }
3879  }
3880  if (IRFunctionArgs.hasSRetArg()) {
3881  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3882  } else if (RetAI.isInAlloca()) {
3883  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3884  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3885  }
3886  }
3887 
3888  Address swiftErrorTemp = Address::invalid();
3889  Address swiftErrorArg = Address::invalid();
3890 
3891  // Translate all of the arguments as necessary to match the IR lowering.
3892  assert(CallInfo.arg_size() == CallArgs.size() &&
3893  "Mismatch between function signature & arguments.");
3894  unsigned ArgNo = 0;
3895  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3896  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3897  I != E; ++I, ++info_it, ++ArgNo) {
3898  const ABIArgInfo &ArgInfo = info_it->info;
3899 
3900  // Insert a padding argument to ensure proper alignment.
3901  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3902  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3903  llvm::UndefValue::get(ArgInfo.getPaddingType());
3904 
3905  unsigned FirstIRArg, NumIRArgs;
3906  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3907 
3908  switch (ArgInfo.getKind()) {
3909  case ABIArgInfo::InAlloca: {
3910  assert(NumIRArgs == 0);
3911  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3912  if (I->isAggregate()) {
3913  // Replace the placeholder with the appropriate argument slot GEP.
3914  Address Addr = I->hasLValue()
3915  ? I->getKnownLValue().getAddress()
3916  : I->getKnownRValue().getAggregateAddress();
3917  llvm::Instruction *Placeholder =
3918  cast<llvm::Instruction>(Addr.getPointer());
3919  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3920  Builder.SetInsertPoint(Placeholder);
3921  Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3922  Builder.restoreIP(IP);
3923  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3924  } else {
3925  // Store the RValue into the argument struct.
3926  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3927  unsigned AS = Addr.getType()->getPointerAddressSpace();
3928  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3929  // There are some cases where a trivial bitcast is not avoidable. The
3930  // definition of a type later in a translation unit may change it's type
3931  // from {}* to (%struct.foo*)*.
3932  if (Addr.getType() != MemType)
3933  Addr = Builder.CreateBitCast(Addr, MemType);
3934  I->copyInto(*this, Addr);
3935  }
3936  break;
3937  }
3938 
3939  case ABIArgInfo::Indirect: {
3940  assert(NumIRArgs == 1);
3941  if (!I->isAggregate()) {
3942  // Make a temporary alloca to pass the argument.
3943  Address Addr = CreateMemTempWithoutCast(
3944  I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3945  IRCallArgs[FirstIRArg] = Addr.getPointer();
3946 
3947  I->copyInto(*this, Addr);
3948  } else {
3949  // We want to avoid creating an unnecessary temporary+copy here;
3950  // however, we need one in three cases:
3951  // 1. If the argument is not byval, and we are required to copy the
3952  // source. (This case doesn't occur on any common architecture.)
3953  // 2. If the argument is byval, RV is not sufficiently aligned, and
3954  // we cannot force it to be sufficiently aligned.
3955  // 3. If the argument is byval, but RV is not located in default
3956  // or alloca address space.
3957  Address Addr = I->hasLValue()
3958  ? I->getKnownLValue().getAddress()
3959  : I->getKnownRValue().getAggregateAddress();
3960  llvm::Value *V = Addr.getPointer();
3961  CharUnits Align = ArgInfo.getIndirectAlign();
3962  const llvm::DataLayout *TD = &CGM.getDataLayout();
3963 
3964  assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3965  IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3966  TD->getAllocaAddrSpace()) &&
3967  "indirect argument must be in alloca address space");
3968 
3969  bool NeedCopy = false;
3970 
3971  if (Addr.getAlignment() < Align &&
3972  llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3973  Align.getQuantity()) {
3974  NeedCopy = true;
3975  } else if (I->hasLValue()) {
3976  auto LV = I->getKnownLValue();
3977  auto AS = LV.getAddressSpace();
3978 
3979  if ((!ArgInfo.getIndirectByVal() &&
3980  (LV.getAlignment() >=
3981  getContext().getTypeAlignInChars(I->Ty)))) {
3982  NeedCopy = true;
3983  }
3984  if (!getLangOpts().OpenCL) {
3985  if ((ArgInfo.getIndirectByVal() &&
3986  (AS != LangAS::Default &&
3987  AS != CGM.getASTAllocaAddressSpace()))) {
3988  NeedCopy = true;
3989  }
3990  }
3991  // For OpenCL even if RV is located in default or alloca address space
3992  // we don't want to perform address space cast for it.
3993  else if ((ArgInfo.getIndirectByVal() &&
3994  Addr.getType()->getAddressSpace() != IRFuncTy->
3995  getParamType(FirstIRArg)->getPointerAddressSpace())) {
3996  NeedCopy = true;
3997  }
3998  }
3999 
4000  if (NeedCopy) {
4001  // Create an aligned temporary, and copy to it.
4002  Address AI = CreateMemTempWithoutCast(
4003  I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4004  IRCallArgs[FirstIRArg] = AI.getPointer();
4005  I->copyInto(*this, AI);
4006  } else {
4007  // Skip the extra memcpy call.
4008  auto *T = V->getType()->getPointerElementType()->getPointerTo(
4009  CGM.getDataLayout().getAllocaAddrSpace());
4010  IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4011  *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4012  true);
4013  }
4014  }
4015  break;
4016  }
4017 
4018  case ABIArgInfo::Ignore:
4019  assert(NumIRArgs == 0);
4020  break;
4021 
4022  case ABIArgInfo::Extend:
4023  case ABIArgInfo::Direct: {
4024  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4025  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4026  ArgInfo.getDirectOffset() == 0) {
4027  assert(NumIRArgs == 1);
4028  llvm::Value *V;
4029  if (!I->isAggregate())
4030  V = I->getKnownRValue().getScalarVal();
4031  else
4032  V = Builder.CreateLoad(
4033  I->hasLValue() ? I->getKnownLValue().getAddress()
4034  : I->getKnownRValue().getAggregateAddress());
4035 
4036  // Implement swifterror by copying into a new swifterror argument.
4037  // We'll write back in the normal path out of the call.
4038  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4040  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4041 
4042  QualType pointeeTy = I->Ty->getPointeeType();
4043  swiftErrorArg =
4044  Address(V, getContext().getTypeAlignInChars(pointeeTy));
4045 
4046  swiftErrorTemp =
4047  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4048  V = swiftErrorTemp.getPointer();
4049  cast<llvm::AllocaInst>(V)->setSwiftError(true);
4050 
4051  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4052  Builder.CreateStore(errorValue, swiftErrorTemp);
4053  }
4054 
4055  // We might have to widen integers, but we should never truncate.
4056  if (ArgInfo.getCoerceToType() != V->getType() &&
4057  V->getType()->isIntegerTy())
4058  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4059 
4060  // If the argument doesn't match, perform a bitcast to coerce it. This
4061  // can happen due to trivial type mismatches.
4062  if (FirstIRArg < IRFuncTy->getNumParams() &&
4063  V->getType() != IRFuncTy->getParamType(FirstIRArg))
4064  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4065 
4066  IRCallArgs[FirstIRArg] = V;
4067  break;
4068  }
4069 
4070  // FIXME: Avoid the conversion through memory if possible.
4071  Address Src = Address::invalid();
4072  if (!I->isAggregate()) {
4073  Src = CreateMemTemp(I->Ty, "coerce");
4074  I->copyInto(*this, Src);
4075  } else {
4076  Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4077  : I->getKnownRValue().getAggregateAddress();
4078  }
4079 
4080  // If the value is offset in memory, apply the offset now.
4081  Src = emitAddressAtOffset(*this, Src, ArgInfo);
4082 
4083  // Fast-isel and the optimizer generally like scalar values better than
4084  // FCAs, so we flatten them if this is safe to do for this argument.
4085  llvm::StructType *STy =
4086  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4087  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4088  llvm::Type *SrcTy = Src.getType()->getElementType();
4089  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4090  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4091 
4092  // If the source type is smaller than the destination type of the
4093  // coerce-to logic, copy the source value into a temp alloca the size
4094  // of the destination type to allow loading all of it. The bits past
4095  // the source value are left undef.
4096  if (SrcSize < DstSize) {
4097  Address TempAlloca
4098  = CreateTempAlloca(STy, Src.getAlignment(),
4099  Src.getName() + ".coerce");
4100  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4101  Src = TempAlloca;
4102  } else {
4103  Src = Builder.CreateBitCast(Src,
4104  STy->getPointerTo(Src.getAddressSpace()));
4105  }
4106 
4107  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4108  assert(NumIRArgs == STy->getNumElements());
4109  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4110  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4111  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4112  llvm::Value *LI = Builder.CreateLoad(EltPtr);
4113  IRCallArgs[FirstIRArg + i] = LI;
4114  }
4115  } else {
4116  // In the simple case, just pass the coerced loaded value.
4117  assert(NumIRArgs == 1);
4118  IRCallArgs[FirstIRArg] =
4119  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4120  }
4121 
4122  break;
4123  }
4124 
4126  auto coercionType = ArgInfo.getCoerceAndExpandType();
4127  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4128 
4129  llvm::Value *tempSize = nullptr;
4130  Address addr = Address::invalid();
4131  Address AllocaAddr = Address::invalid();
4132  if (I->isAggregate()) {
4133  addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4134  : I->getKnownRValue().getAggregateAddress();
4135 
4136  } else {
4137  RValue RV = I->getKnownRValue();
4138  assert(RV.isScalar()); // complex should always just be direct
4139 
4140  llvm::Type *scalarType = RV.getScalarVal()->getType();
4141  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4142  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4143 
4144  // Materialize to a temporary.
4145  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4147  layout->getAlignment(), scalarAlign)),
4148  "tmp",
4149  /*ArraySize=*/nullptr, &AllocaAddr);
4150  tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4151 
4152  Builder.CreateStore(RV.getScalarVal(), addr);
4153  }
4154 
4155  addr = Builder.CreateElementBitCast(addr, coercionType);
4156 
4157  unsigned IRArgPos = FirstIRArg;
4158  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4159  llvm::Type *eltType = coercionType->getElementType(i);
4160  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4161  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4162  llvm::Value *elt = Builder.CreateLoad(eltAddr);
4163  IRCallArgs[IRArgPos++] = elt;
4164  }
4165  assert(IRArgPos == FirstIRArg + NumIRArgs);
4166 
4167  if (tempSize) {
4168  EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4169  }
4170 
4171  break;
4172  }
4173 
4174  case ABIArgInfo::Expand:
4175  unsigned IRArgPos = FirstIRArg;
4176  ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4177  assert(IRArgPos == FirstIRArg + NumIRArgs);
4178  break;
4179  }
4180  }
4181 
4182  const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4183  llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4184 
4185  // If we're using inalloca, set up that argument.
4186  if (ArgMemory.isValid()) {
4187  llvm::Value *Arg = ArgMemory.getPointer();
4188  if (CallInfo.isVariadic()) {
4189  // When passing non-POD arguments by value to variadic functions, we will
4190  // end up with a variadic prototype and an inalloca call site. In such
4191  // cases, we can't do any parameter mismatch checks. Give up and bitcast
4192  // the callee.
4193  unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4194  auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4195  CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4196  } else {
4197  llvm::Type *LastParamTy =
4198  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4199  if (Arg->getType() != LastParamTy) {
4200 #ifndef NDEBUG
4201  // Assert that these structs have equivalent element types.
4202  llvm::StructType *FullTy = CallInfo.getArgStruct();
4203  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4204  cast<llvm::PointerType>(LastParamTy)->getElementType());
4205  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4206  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4207  DE = DeclaredTy->element_end(),
4208  FI = FullTy->element_begin();
4209  DI != DE; ++DI, ++FI)
4210  assert(*DI == *FI);
4211 #endif
4212  Arg = Builder.CreateBitCast(Arg, LastParamTy);
4213  }
4214  }
4215  assert(IRFunctionArgs.hasInallocaArg());
4216  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4217  }
4218 
4219  // 2. Prepare the function pointer.
4220 
4221  // If the callee is a bitcast of a non-variadic function to have a
4222  // variadic function pointer type, check to see if we can remove the
4223  // bitcast. This comes up with unprototyped functions.
4224  //
4225  // This makes the IR nicer, but more importantly it ensures that we
4226  // can inline the function at -O0 if it is marked always_inline.
4227  auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4228  llvm::FunctionType *CalleeFT =
4229  cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4230  if (!CalleeFT->isVarArg())
4231  return Ptr;
4232 
4233  llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4234  if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4235  return Ptr;
4236 
4237  llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4238  if (!OrigFn)
4239  return Ptr;
4240 
4241  llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4242 
4243  // If the original type is variadic, or if any of the component types
4244  // disagree, we cannot remove the cast.
4245  if (OrigFT->isVarArg() ||
4246  OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4247  OrigFT->getReturnType() != CalleeFT->getReturnType())
4248  return Ptr;
4249 
4250  for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4251  if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4252  return Ptr;
4253 
4254  return OrigFn;
4255  };
4256  CalleePtr = simplifyVariadicCallee(CalleePtr);
4257 
4258  // 3. Perform the actual call.
4259 
4260  // Deactivate any cleanups that we're supposed to do immediately before
4261  // the call.
4262  if (!CallArgs.getCleanupsToDeactivate().empty())
4263  deactivateArgCleanupsBeforeCall(*this, CallArgs);
4264 
4265  // Assert that the arguments we computed match up. The IR verifier
4266  // will catch this, but this is a common enough source of problems
4267  // during IRGen changes that it's way better for debugging to catch
4268  // it ourselves here.
4269 #ifndef NDEBUG
4270  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4271  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4272  // Inalloca argument can have different type.
4273  if (IRFunctionArgs.hasInallocaArg() &&
4274  i == IRFunctionArgs.getInallocaArgNo())
4275  continue;
4276  if (i < IRFuncTy->getNumParams())
4277  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4278  }
4279 #endif
4280 
4281  // Update the largest vector width if any arguments have vector types.
4282  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4283  if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4284  LargestVectorWidth = std::max(LargestVectorWidth,
4285  VT->getPrimitiveSizeInBits());
4286  }
4287 
4288  // Compute the calling convention and attributes.
4289  unsigned CallingConv;
4290  llvm::AttributeList Attrs;
4291  CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4292  Callee.getAbstractInfo(), Attrs, CallingConv,
4293  /*AttrOnCallSite=*/true);
4294 
4295  // Apply some call-site-specific attributes.
4296  // TODO: work this into building the attribute set.
4297 
4298  // Apply always_inline to all calls within flatten functions.
4299  // FIXME: should this really take priority over __try, below?
4300  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4301  !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
4302  Callee.getAbstractInfo()
4303  .getCalleeDecl()
4304  .getDecl()
4305  ->hasAttr<NoInlineAttr>())) {
4306  Attrs =
4307  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4308  llvm::Attribute::AlwaysInline);
4309  }
4310 
4311  // Disable inlining inside SEH __try blocks.
4312  if (isSEHTryScope()) {
4313  Attrs =
4314  Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4315  llvm::Attribute::NoInline);
4316  }
4317 
4318  // Decide whether to use a call or an invoke.
4319  bool CannotThrow;
4320  if (currentFunctionUsesSEHTry()) {
4321  // SEH cares about asynchronous exceptions, so everything can "throw."
4322  CannotThrow = false;
4323  } else if (isCleanupPadScope() &&
4325  // The MSVC++ personality will implicitly terminate the program if an
4326  // exception is thrown during a cleanup outside of a try/catch.
4327  // We don't need to model anything in IR to get this behavior.
4328  CannotThrow = true;
4329  } else {
4330  // Otherwise, nounwind call sites will never throw.
4331  CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4332  llvm::Attribute::NoUnwind);
4333  }
4334 
4335  // If we made a temporary, be sure to clean up after ourselves. Note that we
4336  // can't depend on being inside of an ExprWithCleanups, so we need to manually
4337  // pop this cleanup later on. Being eager about this is OK, since this
4338  // temporary is 'invisible' outside of the callee.
4339  if (UnusedReturnSizePtr)
4340  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4341  UnusedReturnSizePtr);
4342 
4343  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4344 
4346  getBundlesForFunclet(CalleePtr);
4347 
4348  // Emit the actual call/invoke instruction.
4349  llvm::CallSite CS;
4350  if (!InvokeDest) {
4351  CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4352  } else {
4353  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4354  CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4355  BundleList);
4356  EmitBlock(Cont);
4357  }
4358  llvm::Instruction *CI = CS.getInstruction();
4359  if (callOrInvoke)
4360  *callOrInvoke = CI;
4361 
4362  // Apply the attributes and calling convention.
4363  CS.setAttributes(Attrs);
4364  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4365 
4366  // Apply various metadata.
4367 
4368  if (!CI->getType()->isVoidTy())
4369  CI->setName("call");
4370 
4371  // Update largest vector width from the return type.
4372  if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4373  LargestVectorWidth = std::max(LargestVectorWidth,
4374  VT->getPrimitiveSizeInBits());
4375 
4376  // Insert instrumentation or attach profile metadata at indirect call sites.
4377  // For more details, see the comment before the definition of
4378  // IPVK_IndirectCallTarget in InstrProfData.inc.
4379  if (!CS.getCalledFunction())
4380  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4381  CI, CalleePtr);
4382 
4383  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4384  // optimizer it can aggressively ignore unwind edges.
4385  if (CGM.getLangOpts().ObjCAutoRefCount)
4386  AddObjCARCExceptionMetadata(CI);
4387 
4388  // Suppress tail calls if requested.
4389  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4390  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4391  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4392  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4393  }
4394 
4395  // 4. Finish the call.
4396 
4397  // If the call doesn't return, finish the basic block and clear the
4398  // insertion point; this allows the rest of IRGen to discard
4399  // unreachable code.
4400  if (CS.doesNotReturn()) {
4401  if (UnusedReturnSizePtr)
4402  PopCleanupBlock();
4403 
4404  // Strip away the noreturn attribute to better diagnose unreachable UB.
4405  if (SanOpts.has(SanitizerKind::Unreachable)) {
4406  if (auto *F = CS.getCalledFunction())
4407  F->removeFnAttr(llvm::Attribute::NoReturn);
4408  CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4409  llvm::Attribute::NoReturn);
4410  }
4411 
4412  EmitUnreachable(Loc);
4413  Builder.ClearInsertionPoint();
4414 
4415  // FIXME: For now, emit a dummy basic block because expr emitters in
4416  // generally are not ready to handle emitting expressions at unreachable
4417  // points.
4418  EnsureInsertPoint();
4419 
4420  // Return a reasonable RValue.
4421  return GetUndefRValue(RetTy);
4422  }
4423 
4424  // Perform the swifterror writeback.
4425  if (swiftErrorTemp.isValid()) {
4426  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4427  Builder.CreateStore(errorResult, swiftErrorArg);
4428  }
4429 
4430  // Emit any call-associated writebacks immediately. Arguably this
4431  // should happen after any return-value munging.
4432  if (CallArgs.hasWritebacks())
4433  emitWritebacks(*this, CallArgs);
4434 
4435  // The stack cleanup for inalloca arguments has to run out of the normal
4436  // lexical order, so deactivate it and run it manually here.
4437  CallArgs.freeArgumentMemory(*this);
4438 
4439  // Extract the return value.
4440  RValue Ret = [&] {
4441  switch (RetAI.getKind()) {
4443  auto coercionType = RetAI.getCoerceAndExpandType();
4444  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4445 
4446  Address addr = SRetPtr;
4447  addr = Builder.CreateElementBitCast(addr, coercionType);
4448 
4449  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4450  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4451 
4452  unsigned unpaddedIndex = 0;
4453  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4454  llvm::Type *eltType = coercionType->getElementType(i);
4455  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4456  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4457  llvm::Value *elt = CI;
4458  if (requiresExtract)
4459  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4460  else
4461  assert(unpaddedIndex == 0);
4462  Builder.CreateStore(elt, eltAddr);
4463  }
4464  // FALLTHROUGH
4465  LLVM_FALLTHROUGH;
4466  }
4467 
4468  case ABIArgInfo::InAlloca:
4469  case ABIArgInfo::Indirect: {
4470  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4471  if (UnusedReturnSizePtr)
4472  PopCleanupBlock();
4473  return ret;
4474  }
4475 
4476  case ABIArgInfo::Ignore:
4477  // If we are ignoring an argument that had a result, make sure to
4478  // construct the appropriate return value for our caller.
4479  return GetUndefRValue(RetTy);
4480 
4481  case ABIArgInfo::Extend:
4482  case ABIArgInfo::Direct: {
4483  llvm::Type *RetIRTy = ConvertType(RetTy);
4484  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4485  switch (getEvaluationKind(RetTy)) {
4486  case TEK_Complex: {
4487  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4488  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4489  return RValue::getComplex(std::make_pair(Real, Imag));
4490  }
4491  case TEK_Aggregate: {
4492  Address DestPtr = ReturnValue.getValue();
4493  bool DestIsVolatile = ReturnValue.isVolatile();
4494 
4495  if (!DestPtr.isValid()) {
4496  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4497  DestIsVolatile = false;
4498  }
4499  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4500  return RValue::getAggregate(DestPtr);
4501  }
4502  case TEK_Scalar: {
4503  // If the argument doesn't match, perform a bitcast to coerce it. This
4504  // can happen due to trivial type mismatches.
4505  llvm::Value *V = CI;
4506  if (V->getType() != RetIRTy)
4507  V = Builder.CreateBitCast(V, RetIRTy);
4508  return RValue::get(V);
4509  }
4510  }
4511  llvm_unreachable("bad evaluation kind");
4512  }
4513 
4514  Address DestPtr = ReturnValue.getValue();
4515  bool DestIsVolatile = ReturnValue.isVolatile();
4516 
4517  if (!DestPtr.isValid()) {
4518  DestPtr = CreateMemTemp(RetTy, "coerce");
4519  DestIsVolatile = false;
4520  }
4521 
4522  // If the value is offset in memory, apply the offset now.
4523  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4524  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4525 
4526  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4527  }
4528 
4529  case ABIArgInfo::Expand:
4530  llvm_unreachable("Invalid ABI kind for return argument");
4531  }
4532 
4533  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4534  } ();
4535 
4536  // Emit the assume_aligned check on the return value.
4537  const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4538  if (Ret.isScalar() && TargetDecl) {
4539  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4540  llvm::Value *OffsetValue = nullptr;
4541  if (const auto *Offset = AA->getOffset())
4542  OffsetValue = EmitScalarExpr(Offset);
4543 
4544  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4545  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4546  EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4547  AlignmentCI->getZExtValue(), OffsetValue);
4548  } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4549  llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4550  .getRValue(*this)
4551  .getScalarVal();
4552  EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4553  AlignmentVal);
4554  }
4555  }
4556 
4557  return Ret;
4558 }
4559 
4561  if (isVirtual()) {
4562  const CallExpr *CE = getVirtualCallExpr();
4564  CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
4565  CE ? CE->getBeginLoc() : SourceLocation());
4566  }
4567 
4568  return *this;
4569 }
4570 
4571 /* VarArg handling */
4572 
4574  VAListAddr = VE->isMicrosoftABI()
4575  ? EmitMSVAListRef(VE->getSubExpr())
4576  : EmitVAListRef(VE->getSubExpr());
4577  QualType Ty = VE->getType();
4578  if (VE->isMicrosoftABI())
4579  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4580  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4581 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:658
const llvm::DataLayout & getDataLayout() const
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1511
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:175
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:360
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
Represents a function declaration or definition.
Definition: Decl.h:1737
Address getAddress() const
Definition: CGValue.h:582
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:632
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
Definition: CGCall.cpp:3000
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2536
Complete object ctor.
Definition: ABI.h:25
CanQualType VoidPtrTy
Definition: ASTContext.h:1043
A (possibly-)qualified type.
Definition: Type.h:634
bool isBlockPointerType() const
Definition: Type.h:6303
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses &#39;sret&#39; when used as a return type.
Definition: CGCall.cpp:1505
bool getNoCfCheck() const
Definition: Type.h:3514
llvm::Type * ConvertTypeForMem(QualType T)
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition: CGCall.cpp:264
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:81
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:138
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:3143
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1117
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:75
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2734
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:567
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:173
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3354
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:189
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:504
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:948
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:4058
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:2227
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:837
Extend - Valid only for integer argument types.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1035
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4573
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:3138
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:247
bool isVirtual() const
Definition: DeclCXX.h:2085
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Definition: CGCall.cpp:4560
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
const Expr * getSubExpr() const
Definition: Expr.h:4115
void addUncopiedAggregate(LValue LV, QualType type)
Definition: CGCall.h:286
bool isVolatile() const
Definition: CGValue.h:300
The base class of the type hierarchy.
Definition: Type.h:1403
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1918
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2197
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:6135
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:963
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:689
const ParmVarDecl * getParamDecl(unsigned I) const
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3782
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:2064
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:377
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2483
virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2177
bool hasWritebacks() const
Definition: CGCall.h:311
Default closure variant of a ctor.
Definition: ABI.h:29
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3812
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
Represents a variable declaration or definition.
Definition: Decl.h:812
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:106
llvm::Instruction * getStackBase() const
Definition: CGCall.h:333
unsigned getNumParams() const
Definition: Type.h:3887
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
llvm::Value * getFunctionPointer() const
Definition: CGCall.h:177
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1221
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6755
void setCoerceToType(llvm::Type *T)
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:3543
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:138
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3381
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:233
llvm::Value * getPointer() const
Definition: Address.h:37
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:305
Address getValue() const
Definition: CGCall.h:380
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
Represents a parameter to a function.
Definition: Decl.h:1549
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:56
void add(RValue rvalue, QualType type)
Definition: CGCall.h:284
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
Definition: CGCall.cpp:45
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:419
Represents a struct/union/class.
Definition: Decl.h:3592
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3373
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:347
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2457
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1017
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:3209
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2814
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.h:3996
Address getAddress() const
Definition: CGValue.h:326
unsigned getRegParm() const
Definition: Type.h:3517
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:4062
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
field_range fields() const
Definition: Decl.h:3783
bool isVolatileQualified() const
Definition: CGValue.h:257
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2294
Represents a member of a struct/union/class.
Definition: Decl.h:2578
CharUnits getAlignment() const
Definition: CGValue.h:315
RequiredArgs getRequiredArgs() const
bool isUsingInAlloca() const
Returns if we&#39;re using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:338
unsigned getFunctionScopeIndex() const
Returns the index of this parameter in its prototype or method scope.
Definition: Decl.h:1602
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:102
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isOrdinary() const
Definition: CGCall.h:168
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:265
CharUnits getArgStructAlignment() const
bool isReferenceType() const
Definition: Type.h:6307
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2284
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that&#39;s being passed call-by-writeback.
Definition: CGCall.cpp:3237
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:513
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:52
bool isVirtual() const
Definition: CGCall.h:186
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Decl.h:738
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:320
bool getProducesResult() const
Definition: Type.h:3512
llvm::FunctionType * getFunctionType() const
Definition: CGCall.h:202
bool isGLValue() const
Definition: Expr.h:252
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:284
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2607
void copyInto(CodeGenFunction &CGF, Address A) const
Definition: CGCall.cpp:3570
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:156
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
llvm::StructType * getCoerceAndExpandType() const
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:214
Wrapper for source info for functions.
Definition: TypeLoc.h:1326
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:108
unsigned getInAllocaFieldIndex() const
const_arg_iterator arg_begin() const
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:70
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:395
LangAS getAddressSpace() const
Definition: Type.h:348
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1832
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:273
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
Definition: CGCall.cpp:136
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:473
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:707
Values of this type can never be null.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
bool isSimple() const
Definition: CGValue.h:251
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:278
bool isInstance() const
Definition: DeclCXX.h:2068
An ordinary object is located at an address in memory.
Definition: Specifiers.h:125
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1696
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:105
FunctionType::ExtInfo getExtInfo() const
QualType getReturnType() const
Definition: DeclObjC.h:322
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:742
bool getNoReturn() const
Definition: Type.h:3511
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:83
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:70
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the &#39;this&#39; type for codegen purposes, i.e.
Definition: CGCall.cpp:72
bool getNoCallerSavedRegs() const
Definition: Type.h:3513
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3587
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:515
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Definition: TargetInfo.h:304
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:3570
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:504
Represents a K&R-style &#39;int foo()&#39; function, which has no information available about its arguments...
Definition: Type.h:3649
bool hasAttr() const
Definition: DeclBase.h:530
CanQualType getReturnType() const
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:996
bool isValid() const
Definition: Address.h:35
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1612
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3686
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:4120
const TargetCodeGenInfo & getTargetCodeGenInfo()
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
writeback_const_range writebacks() const
Definition: CGCall.h:316
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:305
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:3085
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4097
Address Temporary
The temporary alloca.
Definition: CGCall.h:270
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:106
unsigned Offset
Definition: Format.cpp:1630
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:273
ExtParameterInfo withIsNoEscape(bool NoEscape) const
Definition: Type.h:3420
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:3064
This represents one expression.
Definition: Expr.h:106
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2688
static Address invalid()
Definition: Address.h:34
llvm::Type * getUnpaddedCoerceAndExpandType() const
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:3059
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
Definition: TargetInfo.h:704
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:90
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:65
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:700
bool getHasRegParm() const
Definition: Type.h:3515
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6818
bool isObjCRetainableType() const
Definition: Type.cpp:3920
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2705
llvm::Constant * objc_retain
id objc_retain(id);
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:43
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:62
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:361
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2595
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:3712
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when &#39;sret&#39; is used as a return type...
Definition: CGCall.cpp:1510
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:645
QualType getType() const
Definition: Expr.h:128
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1351
Qualifiers getTypeQualifiers() const
Definition: DeclCXX.h:2187
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes &#39;this&#39; as the first parameter followed by varargs.
Definition: CGCall.cpp:534
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2752
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:196
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1902
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:2025
ASTContext & getContext() const
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:413
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1167
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:235
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:34
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1273
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:412
CanQualType getCanonicalTypeUnqualified() const
LValue getKnownLValue() const
Definition: CGCall.h:239
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Definition: Type.h:4379
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1298
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:165
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses &#39;fpret&#39; when used as a return type.
Definition: CGCall.cpp:1515
CanProxy< U > castAs() const
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:3226
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3437
Encodes a location in the source.
QualType getReturnType() const
Definition: Type.h:3612
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2171
A saved depth on the scope stack.
Definition: EHScopeStack.h:106
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:295
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3771
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3393
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1245
CallingConv getCC() const
Definition: Type.h:3524
const Decl * getDecl() const
Definition: GlobalDecl.h:68
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C &#39;SEL&#39; type.
Definition: ASTContext.h:1858
An aggregate value slot.
Definition: CGValue.h:436
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:460
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2040
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2415
const_arg_iterator arg_end() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ObjCEntrypoints & getObjCEntrypoints() const
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3365
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:725
CanQualType VoidTy
Definition: ASTContext.h:1015
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
bool isAnyPointerType() const
Definition: Type.h:6299
An aligned address.
Definition: Address.h:24
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after...
Definition: Type.h:1148
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Definition: TargetInfo.h:710
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:176
All available information about a concrete callee.
Definition: CGCall.h:66
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:369
Complete object dtor.
Definition: ABI.h:35
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses &#39;fp2ret&#39; when used as a return type.
Definition: CGCall.cpp:1532
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1693
bool hasFlexibleArrayMember() const
Definition: Decl.h:3646
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3921
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:625
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
llvm::Type * getPaddingType() const
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:553
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1131
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:355
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:174
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2388
ExtInfo getExtInfo() const
Definition: Type.h:3623
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:92
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:171
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:90
LValue Source
The original argument.
Definition: CGCall.h:267
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:436
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value *> args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3740
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:90
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:1005
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:169
Interesting information about a specific parameter that can&#39;t simply be reflected in parameter&#39;s type...
Definition: Type.h:3380
void EmitARCIntrinsicUse(ArrayRef< llvm::Value *> values)
Given a number of pointers, inform the optimizer that they&#39;re being intrinsically used up until this ...
Definition: CGObjC.cpp:1892
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2165
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
RValue getRValue(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3560
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:801
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:107
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Definition: TargetInfo.cpp:400
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
uint64_t SanitizerMask
Definition: Sanitizers.h:25
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4369
Complex values, per C99 6.2.5p11.
Definition: Type.h:2476
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:75
static bool classof(const OMPClause *T)
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:2020
QualType getCanonicalTypeInternal() const
Definition: Type.h:2354
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:6581
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable &#39;self&#39;, remove it.
Definition: CGCall.cpp:2695
CharUnits getIndirectAlign() const
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
T * getAttr() const
Definition: DeclBase.h:526
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:51
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:644
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:118
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:61
Expand - Only valid for aggregate argument types.
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2672
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type *>::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:983