clang  8.0.0svn
CodeGenFunction.h
Go to the documentation of this file.
1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the internal per-function state used for llvm translation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15 #define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
16 
17 #include "CGBuilder.h"
18 #include "CGDebugInfo.h"
19 #include "CGLoopInfo.h"
20 #include "CGValue.h"
21 #include "CodeGenModule.h"
22 #include "CodeGenPGO.h"
23 #include "EHScopeStack.h"
24 #include "VarBypassDetector.h"
25 #include "clang/AST/CharUnits.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/Type.h"
30 #include "clang/Basic/ABI.h"
33 #include "clang/Basic/TargetInfo.h"
35 #include "llvm/ADT/ArrayRef.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/MapVector.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/IR/ValueHandle.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Transforms/Utils/SanitizerStats.h"
42 
43 namespace llvm {
44 class BasicBlock;
45 class LLVMContext;
46 class MDNode;
47 class Module;
48 class SwitchInst;
49 class Twine;
50 class Value;
51 class CallSite;
52 }
53 
54 namespace clang {
55 class ASTContext;
56 class BlockDecl;
57 class CXXDestructorDecl;
58 class CXXForRangeStmt;
59 class CXXTryStmt;
60 class Decl;
61 class LabelDecl;
62 class EnumConstantDecl;
63 class FunctionDecl;
64 class FunctionProtoType;
65 class LabelStmt;
66 class ObjCContainerDecl;
67 class ObjCInterfaceDecl;
68 class ObjCIvarDecl;
69 class ObjCMethodDecl;
70 class ObjCImplementationDecl;
71 class ObjCPropertyImplDecl;
72 class TargetInfo;
73 class VarDecl;
74 class ObjCForCollectionStmt;
75 class ObjCAtTryStmt;
76 class ObjCAtThrowStmt;
77 class ObjCAtSynchronizedStmt;
78 class ObjCAutoreleasePoolStmt;
79 
80 namespace analyze_os_log {
81 class OSLogBufferLayout;
82 }
83 
84 namespace CodeGen {
85 class CodeGenTypes;
86 class CGCallee;
87 class CGFunctionInfo;
88 class CGRecordLayout;
89 class CGBlockInfo;
90 class CGCXXABI;
91 class BlockByrefHelpers;
92 class BlockByrefInfo;
93 class BlockFlags;
94 class BlockFieldFlags;
95 class RegionCodeGenTy;
96 class TargetCodeGenInfo;
97 struct OMPTaskDataTy;
98 struct CGCoroData;
99 
100 /// The kind of evaluation to perform on values of a particular
101 /// type. Basically, is the code in CGExprScalar, CGExprComplex, or
102 /// CGExprAgg?
103 ///
104 /// TODO: should vectors maybe be split out into their own thing?
109 };
110 
111 #define LIST_SANITIZER_CHECKS \
112  SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
113  SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
114  SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
115  SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
116  SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
117  SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
118  SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
119  SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
120  SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
121  SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
122  SANITIZER_CHECK(MissingReturn, missing_return, 0) \
123  SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
124  SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
125  SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
126  SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
127  SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
128  SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
129  SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
130  SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
131  SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
132  SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
133  SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
134  SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
135 
137 #define SANITIZER_CHECK(Enum, Name, Version) Enum,
139 #undef SANITIZER_CHECK
140 };
141 
142 /// Helper class with most of the code for saving a value for a
143 /// conditional expression cleanup.
145  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
146 
147  /// Answer whether the given value needs extra work to be saved.
148  static bool needsSaving(llvm::Value *value) {
149  // If it's not an instruction, we don't need to save.
150  if (!isa<llvm::Instruction>(value)) return false;
151 
152  // If it's an instruction in the entry block, we don't need to save.
153  llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
154  return (block != &block->getParent()->getEntryBlock());
155  }
156 
157  static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
158  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
159 };
160 
161 /// A partial specialization of DominatingValue for llvm::Values that
162 /// might be llvm::Instructions.
163 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
164  typedef T *type;
165  static type restore(CodeGenFunction &CGF, saved_type value) {
166  return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
167  }
168 };
169 
170 /// A specialization of DominatingValue for Address.
171 template <> struct DominatingValue<Address> {
172  typedef Address type;
173 
174  struct saved_type {
177  };
178 
179  static bool needsSaving(type value) {
180  return DominatingLLVMValue::needsSaving(value.getPointer());
181  }
182  static saved_type save(CodeGenFunction &CGF, type value) {
183  return { DominatingLLVMValue::save(CGF, value.getPointer()),
184  value.getAlignment() };
185  }
186  static type restore(CodeGenFunction &CGF, saved_type value) {
187  return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
188  value.Alignment);
189  }
190 };
191 
192 /// A specialization of DominatingValue for RValue.
193 template <> struct DominatingValue<RValue> {
194  typedef RValue type;
195  class saved_type {
196  enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
197  AggregateAddress, ComplexAddress };
198 
200  unsigned K : 3;
201  unsigned Align : 29;
202  saved_type(llvm::Value *v, Kind k, unsigned a = 0)
203  : Value(v), K(k), Align(a) {}
204 
205  public:
206  static bool needsSaving(RValue value);
207  static saved_type save(CodeGenFunction &CGF, RValue value);
208  RValue restore(CodeGenFunction &CGF);
209 
210  // implementations in CGCleanup.cpp
211  };
212 
213  static bool needsSaving(type value) {
214  return saved_type::needsSaving(value);
215  }
216  static saved_type save(CodeGenFunction &CGF, type value) {
217  return saved_type::save(CGF, value);
218  }
219  static type restore(CodeGenFunction &CGF, saved_type value) {
220  return value.restore(CGF);
221  }
222 };
223 
224 /// CodeGenFunction - This class organizes the per-function state that is used
225 /// while generating LLVM code.
227  CodeGenFunction(const CodeGenFunction &) = delete;
228  void operator=(const CodeGenFunction &) = delete;
229 
230  friend class CGCXXABI;
231 public:
232  /// A jump destination is an abstract label, branching to which may
233  /// require a jump out through normal cleanups.
234  struct JumpDest {
235  JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
236  JumpDest(llvm::BasicBlock *Block,
238  unsigned Index)
239  : Block(Block), ScopeDepth(Depth), Index(Index) {}
240 
241  bool isValid() const { return Block != nullptr; }
242  llvm::BasicBlock *getBlock() const { return Block; }
243  EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
244  unsigned getDestIndex() const { return Index; }
245 
246  // This should be used cautiously.
248  ScopeDepth = depth;
249  }
250 
251  private:
252  llvm::BasicBlock *Block;
254  unsigned Index;
255  };
256 
257  CodeGenModule &CGM; // Per-module state.
259 
260  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
263 
264  // Stores variables for which we can't generate correct lifetime markers
265  // because of jumps.
267 
268  // CodeGen lambda for loops and support for ordered clause
269  typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
270  JumpDest)>
272  typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
273  const unsigned, const bool)>
275 
276  // Codegen lambda for loop bounds in worksharing loop constructs
277  typedef llvm::function_ref<std::pair<LValue, LValue>(
280 
281  // Codegen lambda for loop bounds in dispatch-based loop implementation
282  typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
283  CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
284  Address UB)>
286 
287  /// CGBuilder insert helper. This function is called after an
288  /// instruction is created using Builder.
289  void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
290  llvm::BasicBlock *BB,
291  llvm::BasicBlock::iterator InsertPt) const;
292 
293  /// CurFuncDecl - Holds the Decl for the current outermost
294  /// non-closure context.
296  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
300  llvm::Function *CurFn = nullptr;
301 
302  // Holds coroutine data if the current function is a coroutine. We use a
303  // wrapper to manage its lifetime, so that we don't have to define CGCoroData
304  // in this header.
305  struct CGCoroInfo {
306  std::unique_ptr<CGCoroData> Data;
307  CGCoroInfo();
308  ~CGCoroInfo();
309  };
311 
312  bool isCoroutine() const {
313  return CurCoro.Data != nullptr;
314  }
315 
316  /// CurGD - The GlobalDecl for the current function being compiled.
318 
319  /// PrologueCleanupDepth - The cleanup depth enclosing all the
320  /// cleanups associated with the parameters.
322 
323  /// ReturnBlock - Unified return block.
325 
326  /// ReturnValue - The temporary alloca to hold the return
327  /// value. This is invalid iff the function has no return value.
328  Address ReturnValue = Address::invalid();
329 
330  /// Return true if a label was seen in the current scope.
332  if (CurLexicalScope)
333  return CurLexicalScope->hasLabels();
334  return !LabelMap.empty();
335  }
336 
337  /// AllocaInsertPoint - This is an instruction in the entry block before which
338  /// we prefer to insert allocas.
339  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
340 
341  /// API for captured statement code generation.
343  public:
345  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
346  explicit CGCapturedStmtInfo(const CapturedStmt &S,
348  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
349 
353  E = S.capture_end();
354  I != E; ++I, ++Field) {
355  if (I->capturesThis())
356  CXXThisFieldDecl = *Field;
357  else if (I->capturesVariable())
358  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
359  else if (I->capturesVariableByCopy())
360  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
361  }
362  }
363 
364  virtual ~CGCapturedStmtInfo();
365 
366  CapturedRegionKind getKind() const { return Kind; }
367 
368  virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
369  // Retrieve the value of the context parameter.
370  virtual llvm::Value *getContextValue() const { return ThisValue; }
371 
372  /// Lookup the captured field decl for a variable.
373  virtual const FieldDecl *lookup(const VarDecl *VD) const {
374  return CaptureFields.lookup(VD->getCanonicalDecl());
375  }
376 
377  bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
378  virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
379 
380  static bool classof(const CGCapturedStmtInfo *) {
381  return true;
382  }
383 
384  /// Emit the captured statement body.
385  virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
387  CGF.EmitStmt(S);
388  }
389 
390  /// Get the name of the capture helper.
391  virtual StringRef getHelperName() const { return "__captured_stmt"; }
392 
393  private:
394  /// The kind of captured statement being generated.
396 
397  /// Keep the map between VarDecl and FieldDecl.
398  llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
399 
400  /// The base address of the captured record, passed in as the first
401  /// argument of the parallel region function.
402  llvm::Value *ThisValue;
403 
404  /// Captured 'this' type.
405  FieldDecl *CXXThisFieldDecl;
406  };
407  CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
408 
409  /// RAII for correct setting/restoring of CapturedStmtInfo.
411  private:
412  CodeGenFunction &CGF;
413  CGCapturedStmtInfo *PrevCapturedStmtInfo;
414  public:
415  CGCapturedStmtRAII(CodeGenFunction &CGF,
416  CGCapturedStmtInfo *NewCapturedStmtInfo)
417  : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
418  CGF.CapturedStmtInfo = NewCapturedStmtInfo;
419  }
420  ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
421  };
422 
423  /// An abstract representation of regular/ObjC call/message targets.
425  /// The function declaration of the callee.
426  const Decl *CalleeDecl;
427 
428  public:
429  AbstractCallee() : CalleeDecl(nullptr) {}
430  AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
431  AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
432  bool hasFunctionDecl() const {
433  return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
434  }
435  const Decl *getDecl() const { return CalleeDecl; }
436  unsigned getNumParams() const {
437  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
438  return FD->getNumParams();
439  return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
440  }
441  const ParmVarDecl *getParamDecl(unsigned I) const {
442  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
443  return FD->getParamDecl(I);
444  return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
445  }
446  };
447 
448  /// Sanitizers enabled for this function.
450 
451  /// True if CodeGen currently emits code implementing sanitizer checks.
452  bool IsSanitizerScope = false;
453 
454  /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
456  CodeGenFunction *CGF;
457  public:
458  SanitizerScope(CodeGenFunction *CGF);
459  ~SanitizerScope();
460  };
461 
462  /// In C++, whether we are code generating a thunk. This controls whether we
463  /// should emit cleanups.
464  bool CurFuncIsThunk = false;
465 
466  /// In ARC, whether we should autorelease the return value.
467  bool AutoreleaseResult = false;
468 
469  /// Whether we processed a Microsoft-style asm block during CodeGen. These can
470  /// potentially set the return value.
471  bool SawAsmBlock = false;
472 
473  const NamedDecl *CurSEHParent = nullptr;
474 
475  /// True if the current function is an outlined SEH helper. This can be a
476  /// finally block or filter expression.
477  bool IsOutlinedSEHHelper = false;
478 
479  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
480  llvm::Value *BlockPointer = nullptr;
481 
482  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
483  FieldDecl *LambdaThisCaptureField = nullptr;
484 
485  /// A mapping from NRVO variables to the flags used to indicate
486  /// when the NRVO has been applied to this variable.
487  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
488 
492 
493  llvm::Instruction *CurrentFuncletPad = nullptr;
494 
495  class CallLifetimeEnd final : public EHScopeStack::Cleanup {
496  llvm::Value *Addr;
497  llvm::Value *Size;
498 
499  public:
501  : Addr(addr.getPointer()), Size(size) {}
502 
503  void Emit(CodeGenFunction &CGF, Flags flags) override {
504  CGF.EmitLifetimeEnd(Size, Addr);
505  }
506  };
507 
508  /// Header for data within LifetimeExtendedCleanupStack.
510  /// The size of the following cleanup object.
511  unsigned Size;
512  /// The kind of cleanup to push: a value from the CleanupKind enumeration.
513  unsigned Kind : 31;
514  /// Whether this is a conditional cleanup.
515  unsigned IsConditional : 1;
516 
517  size_t getSize() const { return Size; }
518  CleanupKind getKind() const { return (CleanupKind)Kind; }
519  bool isConditional() const { return IsConditional; }
520  };
521 
522  /// i32s containing the indexes of the cleanup destinations.
523  Address NormalCleanupDest = Address::invalid();
524 
525  unsigned NextCleanupDestIndex = 1;
526 
527  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
528  CGBlockInfo *FirstBlockInfo = nullptr;
529 
530  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
531  llvm::BasicBlock *EHResumeBlock = nullptr;
532 
533  /// The exception slot. All landing pads write the current exception pointer
534  /// into this alloca.
535  llvm::Value *ExceptionSlot = nullptr;
536 
537  /// The selector slot. Under the MandatoryCleanup model, all landing pads
538  /// write the current selector value into this alloca.
539  llvm::AllocaInst *EHSelectorSlot = nullptr;
540 
541  /// A stack of exception code slots. Entering an __except block pushes a slot
542  /// on the stack and leaving pops one. The __exception_code() intrinsic loads
543  /// a value from the top of the stack.
545 
546  /// Value returned by __exception_info intrinsic.
547  llvm::Value *SEHInfo = nullptr;
548 
549  /// Emits a landing pad for the current EH stack.
550  llvm::BasicBlock *EmitLandingPad();
551 
552  llvm::BasicBlock *getInvokeDestImpl();
553 
554  template <class T>
556  return DominatingValue<T>::save(*this, value);
557  }
558 
559 public:
560  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
561  /// rethrows.
563 
564  /// A class controlling the emission of a finally block.
565  class FinallyInfo {
566  /// Where the catchall's edge through the cleanup should go.
567  JumpDest RethrowDest;
568 
569  /// A function to call to enter the catch.
570  llvm::Constant *BeginCatchFn;
571 
572  /// An i1 variable indicating whether or not the @finally is
573  /// running for an exception.
574  llvm::AllocaInst *ForEHVar;
575 
576  /// An i8* variable into which the exception pointer to rethrow
577  /// has been saved.
578  llvm::AllocaInst *SavedExnVar;
579 
580  public:
581  void enter(CodeGenFunction &CGF, const Stmt *Finally,
582  llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
583  llvm::Constant *rethrowFn);
584  void exit(CodeGenFunction &CGF);
585  };
586 
587  /// Returns true inside SEH __try blocks.
588  bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
589 
590  /// Returns true while emitting a cleanuppad.
591  bool isCleanupPadScope() const {
592  return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
593  }
594 
595  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
596  /// current full-expression. Safe against the possibility that
597  /// we're currently inside a conditionally-evaluated expression.
598  template <class T, class... As>
600  // If we're not in a conditional branch, or if none of the
601  // arguments requires saving, then use the unconditional cleanup.
602  if (!isInConditionalBranch())
603  return EHStack.pushCleanup<T>(kind, A...);
604 
605  // Stash values in a tuple so we can guarantee the order of saves.
606  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
607  SavedTuple Saved{saveValueInCond(A)...};
608 
609  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
610  EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
611  initFullExprCleanup();
612  }
613 
614  /// Queue a cleanup to be pushed after finishing the current
615  /// full-expression.
616  template <class T, class... As>
618  if (!isInConditionalBranch())
619  return pushCleanupAfterFullExprImpl<T>(Kind, Address::invalid(), A...);
620 
621  Address ActiveFlag = createCleanupActiveFlag();
622  assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
623  "cleanup active flag should never need saving");
624 
625  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
626  SavedTuple Saved{saveValueInCond(A)...};
627 
628  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
629  pushCleanupAfterFullExprImpl<CleanupType>(Kind, ActiveFlag, Saved);
630  }
631 
632  template <class T, class... As>
634  As... A) {
635  LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
636  ActiveFlag.isValid()};
637 
638  size_t OldSize = LifetimeExtendedCleanupStack.size();
639  LifetimeExtendedCleanupStack.resize(
640  LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
641  (Header.IsConditional ? sizeof(ActiveFlag) : 0));
642 
643  static_assert(sizeof(Header) % alignof(T) == 0,
644  "Cleanup will be allocated on misaligned address");
645  char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
646  new (Buffer) LifetimeExtendedCleanupHeader(Header);
647  new (Buffer + sizeof(Header)) T(A...);
648  if (Header.IsConditional)
649  new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
650  }
651 
652  /// Set up the last cleanup that was pushed as a conditional
653  /// full-expression cleanup.
655  initFullExprCleanupWithFlag(createCleanupActiveFlag());
656  }
657 
658  void initFullExprCleanupWithFlag(Address ActiveFlag);
659  Address createCleanupActiveFlag();
660 
661  /// PushDestructorCleanup - Push a cleanup to call the
662  /// complete-object destructor of an object of the given type at the
663  /// given address. Does nothing if T is not a C++ class type with a
664  /// non-trivial destructor.
665  void PushDestructorCleanup(QualType T, Address Addr);
666 
667  /// PushDestructorCleanup - Push a cleanup to call the
668  /// complete-object variant of the given destructor on the object at
669  /// the given address.
670  void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
671 
672  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
673  /// process all branch fixups.
674  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
675 
676  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
677  /// The block cannot be reactivated. Pops it if it's the top of the
678  /// stack.
679  ///
680  /// \param DominatingIP - An instruction which is known to
681  /// dominate the current IP (if set) and which lies along
682  /// all paths of execution between the current IP and the
683  /// the point at which the cleanup comes into scope.
684  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
685  llvm::Instruction *DominatingIP);
686 
687  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
688  /// Cannot be used to resurrect a deactivated cleanup.
689  ///
690  /// \param DominatingIP - An instruction which is known to
691  /// dominate the current IP (if set) and which lies along
692  /// all paths of execution between the current IP and the
693  /// the point at which the cleanup comes into scope.
694  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
695  llvm::Instruction *DominatingIP);
696 
697  /// Enters a new scope for capturing cleanups, all of which
698  /// will be executed once the scope is exited.
700  EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
701  size_t LifetimeExtendedCleanupStackSize;
702  bool OldDidCallStackSave;
703  protected:
705  private:
706 
707  RunCleanupsScope(const RunCleanupsScope &) = delete;
708  void operator=(const RunCleanupsScope &) = delete;
709 
710  protected:
711  CodeGenFunction& CGF;
712 
713  public:
714  /// Enter a new cleanup scope.
715  explicit RunCleanupsScope(CodeGenFunction &CGF)
716  : PerformCleanup(true), CGF(CGF)
717  {
718  CleanupStackDepth = CGF.EHStack.stable_begin();
719  LifetimeExtendedCleanupStackSize =
720  CGF.LifetimeExtendedCleanupStack.size();
721  OldDidCallStackSave = CGF.DidCallStackSave;
722  CGF.DidCallStackSave = false;
723  OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
724  CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
725  }
726 
727  /// Exit this cleanup scope, emitting any accumulated cleanups.
729  if (PerformCleanup)
730  ForceCleanup();
731  }
732 
733  /// Determine whether this scope requires any cleanups.
734  bool requiresCleanups() const {
735  return CGF.EHStack.stable_begin() != CleanupStackDepth;
736  }
737 
738  /// Force the emission of cleanups now, instead of waiting
739  /// until this object is destroyed.
740  /// \param ValuesToReload - A list of values that need to be available at
741  /// the insertion point after cleanup emission. If cleanup emission created
742  /// a shared cleanup block, these value pointers will be rewritten.
743  /// Otherwise, they not will be modified.
744  void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
745  assert(PerformCleanup && "Already forced cleanup");
746  CGF.DidCallStackSave = OldDidCallStackSave;
747  CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
748  ValuesToReload);
749  PerformCleanup = false;
750  CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
751  }
752  };
753 
754  // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
755  EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
756  EHScopeStack::stable_end();
757 
759  SourceRange Range;
761  LexicalScope *ParentScope;
762 
763  LexicalScope(const LexicalScope &) = delete;
764  void operator=(const LexicalScope &) = delete;
765 
766  public:
767  /// Enter a new cleanup scope.
768  explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
769  : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
770  CGF.CurLexicalScope = this;
771  if (CGDebugInfo *DI = CGF.getDebugInfo())
772  DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
773  }
774 
775  void addLabel(const LabelDecl *label) {
776  assert(PerformCleanup && "adding label to dead scope?");
777  Labels.push_back(label);
778  }
779 
780  /// Exit this cleanup scope, emitting any accumulated
781  /// cleanups.
783  if (CGDebugInfo *DI = CGF.getDebugInfo())
784  DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
785 
786  // If we should perform a cleanup, force them now. Note that
787  // this ends the cleanup scope before rescoping any labels.
788  if (PerformCleanup) {
789  ApplyDebugLocation DL(CGF, Range.getEnd());
790  ForceCleanup();
791  }
792  }
793 
794  /// Force the emission of cleanups now, instead of waiting
795  /// until this object is destroyed.
796  void ForceCleanup() {
797  CGF.CurLexicalScope = ParentScope;
798  RunCleanupsScope::ForceCleanup();
799 
800  if (!Labels.empty())
801  rescopeLabels();
802  }
803 
804  bool hasLabels() const {
805  return !Labels.empty();
806  }
807 
808  void rescopeLabels();
809  };
810 
811  typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
812 
813  /// The class used to assign some variables some temporarily addresses.
814  class OMPMapVars {
815  DeclMapTy SavedLocals;
816  DeclMapTy SavedTempAddresses;
817  OMPMapVars(const OMPMapVars &) = delete;
818  void operator=(const OMPMapVars &) = delete;
819 
820  public:
821  explicit OMPMapVars() = default;
823  assert(SavedLocals.empty() && "Did not restored original addresses.");
824  };
825 
826  /// Sets the address of the variable \p LocalVD to be \p TempAddr in
827  /// function \p CGF.
828  /// \return true if at least one variable was set already, false otherwise.
829  bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
830  Address TempAddr) {
831  LocalVD = LocalVD->getCanonicalDecl();
832  // Only save it once.
833  if (SavedLocals.count(LocalVD)) return false;
834 
835  // Copy the existing local entry to SavedLocals.
836  auto it = CGF.LocalDeclMap.find(LocalVD);
837  if (it != CGF.LocalDeclMap.end())
838  SavedLocals.try_emplace(LocalVD, it->second);
839  else
840  SavedLocals.try_emplace(LocalVD, Address::invalid());
841 
842  // Generate the private entry.
843  QualType VarTy = LocalVD->getType();
844  if (VarTy->isReferenceType()) {
845  Address Temp = CGF.CreateMemTemp(VarTy);
846  CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
847  TempAddr = Temp;
848  }
849  SavedTempAddresses.try_emplace(LocalVD, TempAddr);
850 
851  return true;
852  }
853 
854  /// Applies new addresses to the list of the variables.
855  /// \return true if at least one variable is using new address, false
856  /// otherwise.
857  bool apply(CodeGenFunction &CGF) {
858  copyInto(SavedTempAddresses, CGF.LocalDeclMap);
859  SavedTempAddresses.clear();
860  return !SavedLocals.empty();
861  }
862 
863  /// Restores original addresses of the variables.
864  void restore(CodeGenFunction &CGF) {
865  if (!SavedLocals.empty()) {
866  copyInto(SavedLocals, CGF.LocalDeclMap);
867  SavedLocals.clear();
868  }
869  }
870 
871  private:
872  /// Copy all the entries in the source map over the corresponding
873  /// entries in the destination, which must exist.
874  static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
875  for (auto &Pair : Src) {
876  if (!Pair.second.isValid()) {
877  Dest.erase(Pair.first);
878  continue;
879  }
880 
881  auto I = Dest.find(Pair.first);
882  if (I != Dest.end())
883  I->second = Pair.second;
884  else
885  Dest.insert(Pair);
886  }
887  }
888  };
889 
890  /// The scope used to remap some variables as private in the OpenMP loop body
891  /// (or other captured region emitted without outlining), and to restore old
892  /// vars back on exit.
894  OMPMapVars MappedVars;
895  OMPPrivateScope(const OMPPrivateScope &) = delete;
896  void operator=(const OMPPrivateScope &) = delete;
897 
898  public:
899  /// Enter a new OpenMP private scope.
900  explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
901 
902  /// Registers \p LocalVD variable as a private and apply \p PrivateGen
903  /// function for it to generate corresponding private variable. \p
904  /// PrivateGen returns an address of the generated private variable.
905  /// \return true if the variable is registered as private, false if it has
906  /// been privatized already.
907  bool addPrivate(const VarDecl *LocalVD,
908  const llvm::function_ref<Address()> PrivateGen) {
909  assert(PerformCleanup && "adding private to dead scope");
910  return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
911  }
912 
913  /// Privatizes local variables previously registered as private.
914  /// Registration is separate from the actual privatization to allow
915  /// initializers use values of the original variables, not the private one.
916  /// This is important, for example, if the private variable is a class
917  /// variable initialized by a constructor that references other private
918  /// variables. But at initialization original variables must be used, not
919  /// private copies.
920  /// \return true if at least one variable was privatized, false otherwise.
921  bool Privatize() { return MappedVars.apply(CGF); }
922 
923  void ForceCleanup() {
924  RunCleanupsScope::ForceCleanup();
925  MappedVars.restore(CGF);
926  }
927 
928  /// Exit scope - all the mapped variables are restored.
930  if (PerformCleanup)
931  ForceCleanup();
932  }
933 
934  /// Checks if the global variable is captured in current function.
935  bool isGlobalVarCaptured(const VarDecl *VD) const {
936  VD = VD->getCanonicalDecl();
937  return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
938  }
939  };
940 
941  /// Takes the old cleanup stack size and emits the cleanup blocks
942  /// that have been added.
943  void
944  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
945  std::initializer_list<llvm::Value **> ValuesToReload = {});
946 
947  /// Takes the old cleanup stack size and emits the cleanup blocks
948  /// that have been added, then adds all lifetime-extended cleanups from
949  /// the given position to the stack.
950  void
951  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
952  size_t OldLifetimeExtendedStackSize,
953  std::initializer_list<llvm::Value **> ValuesToReload = {});
954 
955  void ResolveBranchFixups(llvm::BasicBlock *Target);
956 
957  /// The given basic block lies in the current EH scope, but may be a
958  /// target of a potentially scope-crossing jump; get a stable handle
959  /// to which we can perform this jump later.
960  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
961  return JumpDest(Target,
962  EHStack.getInnermostNormalCleanup(),
963  NextCleanupDestIndex++);
964  }
965 
966  /// The given basic block lies in the current EH scope, but may be a
967  /// target of a potentially scope-crossing jump; get a stable handle
968  /// to which we can perform this jump later.
969  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
970  return getJumpDestInCurrentScope(createBasicBlock(Name));
971  }
972 
973  /// EmitBranchThroughCleanup - Emit a branch from the current insert
974  /// block through the normal cleanup handling code (if any) and then
975  /// on to \arg Dest.
976  void EmitBranchThroughCleanup(JumpDest Dest);
977 
978  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
979  /// specified destination obviously has no cleanups to run. 'false' is always
980  /// a conservatively correct answer for this method.
981  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
982 
983  /// popCatchScope - Pops the catch scope at the top of the EHScope
984  /// stack, emitting any required code (other than the catch handlers
985  /// themselves).
986  void popCatchScope();
987 
988  llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
989  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
990  llvm::BasicBlock *
991  getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
992 
993  /// An object to manage conditionally-evaluated expressions.
995  llvm::BasicBlock *StartBB;
996 
997  public:
998  ConditionalEvaluation(CodeGenFunction &CGF)
999  : StartBB(CGF.Builder.GetInsertBlock()) {}
1000 
1001  void begin(CodeGenFunction &CGF) {
1002  assert(CGF.OutermostConditional != this);
1003  if (!CGF.OutermostConditional)
1004  CGF.OutermostConditional = this;
1005  }
1006 
1007  void end(CodeGenFunction &CGF) {
1008  assert(CGF.OutermostConditional != nullptr);
1009  if (CGF.OutermostConditional == this)
1010  CGF.OutermostConditional = nullptr;
1011  }
1012 
1013  /// Returns a block which will be executed prior to each
1014  /// evaluation of the conditional code.
1015  llvm::BasicBlock *getStartingBlock() const {
1016  return StartBB;
1017  }
1018  };
1019 
1020  /// isInConditionalBranch - Return true if we're currently emitting
1021  /// one branch or the other of a conditional expression.
1022  bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1023 
1025  assert(isInConditionalBranch());
1026  llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1027  auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
1028  store->setAlignment(addr.getAlignment().getQuantity());
1029  }
1030 
1031  /// An RAII object to record that we're evaluating a statement
1032  /// expression.
1034  CodeGenFunction &CGF;
1035 
1036  /// We have to save the outermost conditional: cleanups in a
1037  /// statement expression aren't conditional just because the
1038  /// StmtExpr is.
1039  ConditionalEvaluation *SavedOutermostConditional;
1040 
1041  public:
1042  StmtExprEvaluation(CodeGenFunction &CGF)
1043  : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1044  CGF.OutermostConditional = nullptr;
1045  }
1046 
1048  CGF.OutermostConditional = SavedOutermostConditional;
1049  CGF.EnsureInsertPoint();
1050  }
1051  };
1052 
1053  /// An object which temporarily prevents a value from being
1054  /// destroyed by aggressive peephole optimizations that assume that
1055  /// all uses of a value have been realized in the IR.
1057  llvm::Instruction *Inst;
1058  friend class CodeGenFunction;
1059 
1060  public:
1061  PeepholeProtection() : Inst(nullptr) {}
1062  };
1063 
1064  /// A non-RAII class containing all the information about a bound
1065  /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1066  /// this which makes individual mappings very simple; using this
1067  /// class directly is useful when you have a variable number of
1068  /// opaque values or don't want the RAII functionality for some
1069  /// reason.
1071  const OpaqueValueExpr *OpaqueValue;
1072  bool BoundLValue;
1074 
1076  bool boundLValue)
1077  : OpaqueValue(ov), BoundLValue(boundLValue) {}
1078  public:
1079  OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1080 
1081  static bool shouldBindAsLValue(const Expr *expr) {
1082  // gl-values should be bound as l-values for obvious reasons.
1083  // Records should be bound as l-values because IR generation
1084  // always keeps them in memory. Expressions of function type
1085  // act exactly like l-values but are formally required to be
1086  // r-values in C.
1087  return expr->isGLValue() ||
1088  expr->getType()->isFunctionType() ||
1089  hasAggregateEvaluationKind(expr->getType());
1090  }
1091 
1092  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1093  const OpaqueValueExpr *ov,
1094  const Expr *e) {
1095  if (shouldBindAsLValue(ov))
1096  return bind(CGF, ov, CGF.EmitLValue(e));
1097  return bind(CGF, ov, CGF.EmitAnyExpr(e));
1098  }
1099 
1100  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1101  const OpaqueValueExpr *ov,
1102  const LValue &lv) {
1103  assert(shouldBindAsLValue(ov));
1104  CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1105  return OpaqueValueMappingData(ov, true);
1106  }
1107 
1108  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1109  const OpaqueValueExpr *ov,
1110  const RValue &rv) {
1111  assert(!shouldBindAsLValue(ov));
1112  CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1113 
1114  OpaqueValueMappingData data(ov, false);
1115 
1116  // Work around an extremely aggressive peephole optimization in
1117  // EmitScalarConversion which assumes that all other uses of a
1118  // value are extant.
1119  data.Protection = CGF.protectFromPeepholes(rv);
1120 
1121  return data;
1122  }
1123 
1124  bool isValid() const { return OpaqueValue != nullptr; }
1125  void clear() { OpaqueValue = nullptr; }
1126 
1127  void unbind(CodeGenFunction &CGF) {
1128  assert(OpaqueValue && "no data to unbind!");
1129 
1130  if (BoundLValue) {
1131  CGF.OpaqueLValues.erase(OpaqueValue);
1132  } else {
1133  CGF.OpaqueRValues.erase(OpaqueValue);
1134  CGF.unprotectFromPeepholes(Protection);
1135  }
1136  }
1137  };
1138 
1139  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1141  CodeGenFunction &CGF;
1143 
1144  public:
1145  static bool shouldBindAsLValue(const Expr *expr) {
1146  return OpaqueValueMappingData::shouldBindAsLValue(expr);
1147  }
1148 
1149  /// Build the opaque value mapping for the given conditional
1150  /// operator if it's the GNU ?: extension. This is a common
1151  /// enough pattern that the convenience operator is really
1152  /// helpful.
1153  ///
1154  OpaqueValueMapping(CodeGenFunction &CGF,
1155  const AbstractConditionalOperator *op) : CGF(CGF) {
1156  if (isa<ConditionalOperator>(op))
1157  // Leave Data empty.
1158  return;
1159 
1160  const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1161  Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1162  e->getCommon());
1163  }
1164 
1165  /// Build the opaque value mapping for an OpaqueValueExpr whose source
1166  /// expression is set to the expression the OVE represents.
1167  OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
1168  : CGF(CGF) {
1169  if (OV) {
1170  assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1171  "for OVE with no source expression");
1172  Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
1173  }
1174  }
1175 
1176  OpaqueValueMapping(CodeGenFunction &CGF,
1177  const OpaqueValueExpr *opaqueValue,
1178  LValue lvalue)
1179  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1180  }
1181 
1182  OpaqueValueMapping(CodeGenFunction &CGF,
1183  const OpaqueValueExpr *opaqueValue,
1184  RValue rvalue)
1185  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1186  }
1187 
1188  void pop() {
1189  Data.unbind(CGF);
1190  Data.clear();
1191  }
1192 
1194  if (Data.isValid()) Data.unbind(CGF);
1195  }
1196  };
1197 
1198 private:
1199  CGDebugInfo *DebugInfo;
1200  bool DisableDebugInfo = false;
1201 
1202  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1203  /// calling llvm.stacksave for multiple VLAs in the same scope.
1204  bool DidCallStackSave = false;
1205 
1206  /// IndirectBranch - The first time an indirect goto is seen we create a block
1207  /// with an indirect branch. Every time we see the address of a label taken,
1208  /// we add the label to the indirect goto. Every subsequent indirect goto is
1209  /// codegen'd as a jump to the IndirectBranch's basic block.
1210  llvm::IndirectBrInst *IndirectBranch = nullptr;
1211 
1212  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1213  /// decls.
1214  DeclMapTy LocalDeclMap;
1215 
1216  // Keep track of the cleanups for callee-destructed parameters pushed to the
1217  // cleanup stack so that they can be deactivated later.
1218  llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1219  CalleeDestructedParamCleanups;
1220 
1221  /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1222  /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1223  /// parameter.
1224  llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1225  SizeArguments;
1226 
1227  /// Track escaped local variables with auto storage. Used during SEH
1228  /// outlining to produce a call to llvm.localescape.
1229  llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1230 
1231  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1232  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1233 
1234  // BreakContinueStack - This keeps track of where break and continue
1235  // statements should jump to.
1236  struct BreakContinue {
1237  BreakContinue(JumpDest Break, JumpDest Continue)
1238  : BreakBlock(Break), ContinueBlock(Continue) {}
1239 
1240  JumpDest BreakBlock;
1241  JumpDest ContinueBlock;
1242  };
1243  SmallVector<BreakContinue, 8> BreakContinueStack;
1244 
1245  /// Handles cancellation exit points in OpenMP-related constructs.
1246  class OpenMPCancelExitStack {
1247  /// Tracks cancellation exit point and join point for cancel-related exit
1248  /// and normal exit.
1249  struct CancelExit {
1250  CancelExit() = default;
1251  CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1252  JumpDest ContBlock)
1253  : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1255  /// true if the exit block has been emitted already by the special
1256  /// emitExit() call, false if the default codegen is used.
1257  bool HasBeenEmitted = false;
1258  JumpDest ExitBlock;
1259  JumpDest ContBlock;
1260  };
1261 
1263 
1264  public:
1265  OpenMPCancelExitStack() : Stack(1) {}
1266  ~OpenMPCancelExitStack() = default;
1267  /// Fetches the exit block for the current OpenMP construct.
1268  JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1269  /// Emits exit block with special codegen procedure specific for the related
1270  /// OpenMP construct + emits code for normal construct cleanup.
1271  void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1272  const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1273  if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1274  assert(CGF.getOMPCancelDestination(Kind).isValid());
1275  assert(CGF.HaveInsertPoint());
1276  assert(!Stack.back().HasBeenEmitted);
1277  auto IP = CGF.Builder.saveAndClearIP();
1278  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1279  CodeGen(CGF);
1280  CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1281  CGF.Builder.restoreIP(IP);
1282  Stack.back().HasBeenEmitted = true;
1283  }
1284  CodeGen(CGF);
1285  }
1286  /// Enter the cancel supporting \a Kind construct.
1287  /// \param Kind OpenMP directive that supports cancel constructs.
1288  /// \param HasCancel true, if the construct has inner cancel directive,
1289  /// false otherwise.
1290  void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1291  Stack.push_back({Kind,
1292  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1293  : JumpDest(),
1294  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1295  : JumpDest()});
1296  }
1297  /// Emits default exit point for the cancel construct (if the special one
1298  /// has not be used) + join point for cancel/normal exits.
1299  void exit(CodeGenFunction &CGF) {
1300  if (getExitBlock().isValid()) {
1301  assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1302  bool HaveIP = CGF.HaveInsertPoint();
1303  if (!Stack.back().HasBeenEmitted) {
1304  if (HaveIP)
1305  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1306  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1307  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1308  }
1309  CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1310  if (!HaveIP) {
1311  CGF.Builder.CreateUnreachable();
1312  CGF.Builder.ClearInsertionPoint();
1313  }
1314  }
1315  Stack.pop_back();
1316  }
1317  };
1318  OpenMPCancelExitStack OMPCancelStack;
1319 
1320  CodeGenPGO PGO;
1321 
1322  /// Calculate branch weights appropriate for PGO data
1323  llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount);
1324  llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights);
1325  llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1326  uint64_t LoopCount);
1327 
1328 public:
1329  /// Increment the profiler's counter for the given statement by \p StepV.
1330  /// If \p StepV is null, the default increment is 1.
1331  void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1333  PGO.emitCounterIncrement(Builder, S, StepV);
1334  PGO.setCurrentStmt(S);
1335  }
1336 
1337  /// Get the profiler's count for the given statement.
1338  uint64_t getProfileCount(const Stmt *S) {
1339  Optional<uint64_t> Count = PGO.getStmtCount(S);
1340  if (!Count.hasValue())
1341  return 0;
1342  return *Count;
1343  }
1344 
1345  /// Set the profiler's current count.
1346  void setCurrentProfileCount(uint64_t Count) {
1347  PGO.setCurrentRegionCount(Count);
1348  }
1349 
1350  /// Get the profiler's current count. This is generally the count for the most
1351  /// recently incremented counter.
1353  return PGO.getCurrentRegionCount();
1354  }
1355 
1356 private:
1357 
1358  /// SwitchInsn - This is nearest current switch instruction. It is null if
1359  /// current context is not in a switch.
1360  llvm::SwitchInst *SwitchInsn = nullptr;
1361  /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1362  SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1363 
1364  /// CaseRangeBlock - This block holds if condition check for last case
1365  /// statement range in current switch instruction.
1366  llvm::BasicBlock *CaseRangeBlock = nullptr;
1367 
1368  /// OpaqueLValues - Keeps track of the current set of opaque value
1369  /// expressions.
1370  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1371  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1372 
1373  // VLASizeMap - This keeps track of the associated size for each VLA type.
1374  // We track this by the size expression rather than the type itself because
1375  // in certain situations, like a const qualifier applied to an VLA typedef,
1376  // multiple VLA types can share the same size expression.
1377  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1378  // enter/leave scopes.
1379  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1380 
1381  /// A block containing a single 'unreachable' instruction. Created
1382  /// lazily by getUnreachableBlock().
1383  llvm::BasicBlock *UnreachableBlock = nullptr;
1384 
1385  /// Counts of the number return expressions in the function.
1386  unsigned NumReturnExprs = 0;
1387 
1388  /// Count the number of simple (constant) return expressions in the function.
1389  unsigned NumSimpleReturnExprs = 0;
1390 
1391  /// The last regular (non-return) debug location (breakpoint) in the function.
1392  SourceLocation LastStopPoint;
1393 
1394 public:
1395  /// A scope within which we are constructing the fields of an object which
1396  /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1397  /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1399  public:
1400  FieldConstructionScope(CodeGenFunction &CGF, Address This)
1401  : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1402  CGF.CXXDefaultInitExprThis = This;
1403  }
1405  CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1406  }
1407 
1408  private:
1409  CodeGenFunction &CGF;
1410  Address OldCXXDefaultInitExprThis;
1411  };
1412 
1413  /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1414  /// is overridden to be the object under construction.
1416  public:
1417  CXXDefaultInitExprScope(CodeGenFunction &CGF)
1418  : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1419  OldCXXThisAlignment(CGF.CXXThisAlignment) {
1420  CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
1421  CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1422  }
1424  CGF.CXXThisValue = OldCXXThisValue;
1425  CGF.CXXThisAlignment = OldCXXThisAlignment;
1426  }
1427 
1428  public:
1429  CodeGenFunction &CGF;
1432  };
1433 
1434  /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1435  /// current loop index is overridden.
1437  public:
1438  ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1439  : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1440  CGF.ArrayInitIndex = Index;
1441  }
1443  CGF.ArrayInitIndex = OldArrayInitIndex;
1444  }
1445 
1446  private:
1447  CodeGenFunction &CGF;
1448  llvm::Value *OldArrayInitIndex;
1449  };
1450 
1452  public:
1454  : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1455  OldCurCodeDecl(CGF.CurCodeDecl),
1456  OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1457  OldCXXABIThisValue(CGF.CXXABIThisValue),
1458  OldCXXThisValue(CGF.CXXThisValue),
1459  OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1460  OldCXXThisAlignment(CGF.CXXThisAlignment),
1461  OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1462  OldCXXInheritedCtorInitExprArgs(
1463  std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1464  CGF.CurGD = GD;
1465  CGF.CurFuncDecl = CGF.CurCodeDecl =
1466  cast<CXXConstructorDecl>(GD.getDecl());
1467  CGF.CXXABIThisDecl = nullptr;
1468  CGF.CXXABIThisValue = nullptr;
1469  CGF.CXXThisValue = nullptr;
1470  CGF.CXXABIThisAlignment = CharUnits();
1471  CGF.CXXThisAlignment = CharUnits();
1472  CGF.ReturnValue = Address::invalid();
1473  CGF.FnRetTy = QualType();
1474  CGF.CXXInheritedCtorInitExprArgs.clear();
1475  }
1477  CGF.CurGD = OldCurGD;
1478  CGF.CurFuncDecl = OldCurFuncDecl;
1479  CGF.CurCodeDecl = OldCurCodeDecl;
1480  CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1481  CGF.CXXABIThisValue = OldCXXABIThisValue;
1482  CGF.CXXThisValue = OldCXXThisValue;
1483  CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1484  CGF.CXXThisAlignment = OldCXXThisAlignment;
1485  CGF.ReturnValue = OldReturnValue;
1486  CGF.FnRetTy = OldFnRetTy;
1487  CGF.CXXInheritedCtorInitExprArgs =
1488  std::move(OldCXXInheritedCtorInitExprArgs);
1489  }
1490 
1491  private:
1492  CodeGenFunction &CGF;
1493  GlobalDecl OldCurGD;
1494  const Decl *OldCurFuncDecl;
1495  const Decl *OldCurCodeDecl;
1496  ImplicitParamDecl *OldCXXABIThisDecl;
1497  llvm::Value *OldCXXABIThisValue;
1498  llvm::Value *OldCXXThisValue;
1499  CharUnits OldCXXABIThisAlignment;
1500  CharUnits OldCXXThisAlignment;
1501  Address OldReturnValue;
1502  QualType OldFnRetTy;
1503  CallArgList OldCXXInheritedCtorInitExprArgs;
1504  };
1505 
1506 private:
1507  /// CXXThisDecl - When generating code for a C++ member function,
1508  /// this will hold the implicit 'this' declaration.
1509  ImplicitParamDecl *CXXABIThisDecl = nullptr;
1510  llvm::Value *CXXABIThisValue = nullptr;
1511  llvm::Value *CXXThisValue = nullptr;
1512  CharUnits CXXABIThisAlignment;
1513  CharUnits CXXThisAlignment;
1514 
1515  /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
1516  /// this expression.
1517  Address CXXDefaultInitExprThis = Address::invalid();
1518 
1519  /// The current array initialization index when evaluating an
1520  /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
1521  llvm::Value *ArrayInitIndex = nullptr;
1522 
1523  /// The values of function arguments to use when evaluating
1524  /// CXXInheritedCtorInitExprs within this context.
1525  CallArgList CXXInheritedCtorInitExprArgs;
1526 
1527  /// CXXStructorImplicitParamDecl - When generating code for a constructor or
1528  /// destructor, this will hold the implicit argument (e.g. VTT).
1529  ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
1530  llvm::Value *CXXStructorImplicitParamValue = nullptr;
1531 
1532  /// OutermostConditional - Points to the outermost active
1533  /// conditional control. This is used so that we know if a
1534  /// temporary should be destroyed conditionally.
1535  ConditionalEvaluation *OutermostConditional = nullptr;
1536 
1537  /// The current lexical scope.
1538  LexicalScope *CurLexicalScope = nullptr;
1539 
1540  /// The current source location that should be used for exception
1541  /// handling code.
1542  SourceLocation CurEHLocation;
1543 
1544  /// BlockByrefInfos - For each __block variable, contains
1545  /// information about the layout of the variable.
1546  llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
1547 
1548  /// Used by -fsanitize=nullability-return to determine whether the return
1549  /// value can be checked.
1550  llvm::Value *RetValNullabilityPrecondition = nullptr;
1551 
1552  /// Check if -fsanitize=nullability-return instrumentation is required for
1553  /// this function.
1554  bool requiresReturnValueNullabilityCheck() const {
1555  return RetValNullabilityPrecondition;
1556  }
1557 
1558  /// Used to store precise source locations for return statements by the
1559  /// runtime return value checks.
1560  Address ReturnLocation = Address::invalid();
1561 
1562  /// Check if the return value of this function requires sanitization.
1563  bool requiresReturnValueCheck() const {
1564  return requiresReturnValueNullabilityCheck() ||
1565  (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1566  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>());
1567  }
1568 
1569  llvm::BasicBlock *TerminateLandingPad = nullptr;
1570  llvm::BasicBlock *TerminateHandler = nullptr;
1571  llvm::BasicBlock *TrapBB = nullptr;
1572 
1573  /// Terminate funclets keyed by parent funclet pad.
1574  llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
1575 
1576  /// Largest vector width used in ths function. Will be used to create a
1577  /// function attribute.
1578  unsigned LargestVectorWidth = 0;
1579 
1580  /// True if we need emit the life-time markers.
1581  const bool ShouldEmitLifetimeMarkers;
1582 
1583  /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
1584  /// the function metadata.
1585  void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
1586  llvm::Function *Fn);
1587 
1588 public:
1589  CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1590  ~CodeGenFunction();
1591 
1592  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1593  ASTContext &getContext() const { return CGM.getContext(); }
1595  if (DisableDebugInfo)
1596  return nullptr;
1597  return DebugInfo;
1598  }
1599  void disableDebugInfo() { DisableDebugInfo = true; }
1600  void enableDebugInfo() { DisableDebugInfo = false; }
1601 
1603  return CGM.getCodeGenOpts().OptimizationLevel == 0;
1604  }
1605 
1606  const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1607 
1608  /// Returns a pointer to the function's exception object and selector slot,
1609  /// which is assigned in every landing pad.
1610  Address getExceptionSlot();
1611  Address getEHSelectorSlot();
1612 
1613  /// Returns the contents of the function's exception object and selector
1614  /// slots.
1615  llvm::Value *getExceptionFromSlot();
1616  llvm::Value *getSelectorFromSlot();
1617 
1618  Address getNormalCleanupDestSlot();
1619 
1620  llvm::BasicBlock *getUnreachableBlock() {
1621  if (!UnreachableBlock) {
1622  UnreachableBlock = createBasicBlock("unreachable");
1623  new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1624  }
1625  return UnreachableBlock;
1626  }
1627 
1628  llvm::BasicBlock *getInvokeDest() {
1629  if (!EHStack.requiresLandingPad()) return nullptr;
1630  return getInvokeDestImpl();
1631  }
1632 
1633  bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; }
1634 
1635  const TargetInfo &getTarget() const { return Target; }
1636  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1638  return CGM.getTargetCodeGenInfo();
1639  }
1640 
1641  //===--------------------------------------------------------------------===//
1642  // Cleanups
1643  //===--------------------------------------------------------------------===//
1644 
1645  typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
1646 
1647  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1648  Address arrayEndPointer,
1649  QualType elementType,
1650  CharUnits elementAlignment,
1651  Destroyer *destroyer);
1652  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1653  llvm::Value *arrayEnd,
1654  QualType elementType,
1655  CharUnits elementAlignment,
1656  Destroyer *destroyer);
1657 
1658  void pushDestroy(QualType::DestructionKind dtorKind,
1659  Address addr, QualType type);
1660  void pushEHDestroy(QualType::DestructionKind dtorKind,
1661  Address addr, QualType type);
1662  void pushDestroy(CleanupKind kind, Address addr, QualType type,
1663  Destroyer *destroyer, bool useEHCleanupForArray);
1664  void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
1665  QualType type, Destroyer *destroyer,
1666  bool useEHCleanupForArray);
1667  void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1668  llvm::Value *CompletePtr,
1669  QualType ElementType);
1670  void pushStackRestore(CleanupKind kind, Address SPMem);
1671  void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
1672  bool useEHCleanupForArray);
1673  llvm::Function *generateDestroyHelper(Address addr, QualType type,
1674  Destroyer *destroyer,
1675  bool useEHCleanupForArray,
1676  const VarDecl *VD);
1677  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1678  QualType elementType, CharUnits elementAlign,
1679  Destroyer *destroyer,
1680  bool checkZeroLength, bool useEHCleanup);
1681 
1682  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1683 
1684  /// Determines whether an EH cleanup is required to destroy a type
1685  /// with the given destruction kind.
1687  switch (kind) {
1688  case QualType::DK_none:
1689  return false;
1690  case QualType::DK_cxx_destructor:
1691  case QualType::DK_objc_weak_lifetime:
1692  case QualType::DK_nontrivial_c_struct:
1693  return getLangOpts().Exceptions;
1694  case QualType::DK_objc_strong_lifetime:
1695  return getLangOpts().Exceptions &&
1696  CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1697  }
1698  llvm_unreachable("bad destruction kind");
1699  }
1700 
1702  return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1703  }
1704 
1705  //===--------------------------------------------------------------------===//
1706  // Objective-C
1707  //===--------------------------------------------------------------------===//
1708 
1709  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1710 
1711  void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
1712 
1713  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1714  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1715  const ObjCPropertyImplDecl *PID);
1716  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1717  const ObjCPropertyImplDecl *propImpl,
1718  const ObjCMethodDecl *GetterMothodDecl,
1719  llvm::Constant *AtomicHelperFn);
1720 
1721  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1722  ObjCMethodDecl *MD, bool ctor);
1723 
1724  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1725  /// for the given property.
1726  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1727  const ObjCPropertyImplDecl *PID);
1728  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1729  const ObjCPropertyImplDecl *propImpl,
1730  llvm::Constant *AtomicHelperFn);
1731 
1732  //===--------------------------------------------------------------------===//
1733  // Block Bits
1734  //===--------------------------------------------------------------------===//
1735 
1736  /// Emit block literal.
1737  /// \return an LLVM value which is a pointer to a struct which contains
1738  /// information about the block, including the block invoke function, the
1739  /// captured variables, etc.
1740  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1741  static void destroyBlockInfos(CGBlockInfo *info);
1742 
1743  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1744  const CGBlockInfo &Info,
1745  const DeclMapTy &ldm,
1746  bool IsLambdaConversionToBlock,
1747  bool BuildGlobalBlock);
1748 
1749  /// Check if \p T is a C++ class that has a destructor that can throw.
1750  static bool cxxDestructorCanThrow(QualType T);
1751 
1752  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1753  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1754  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1755  const ObjCPropertyImplDecl *PID);
1756  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1757  const ObjCPropertyImplDecl *PID);
1758  llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1759 
1760  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
1761  bool CanThrow);
1762 
1763  class AutoVarEmission;
1764 
1765  void emitByrefStructureInit(const AutoVarEmission &emission);
1766 
1767  /// Enter a cleanup to destroy a __block variable. Note that this
1768  /// cleanup should be a no-op if the variable hasn't left the stack
1769  /// yet; if a cleanup is required for the variable itself, that needs
1770  /// to be done externally.
1771  ///
1772  /// \param Kind Cleanup kind.
1773  ///
1774  /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
1775  /// structure that will be passed to _Block_object_dispose. When
1776  /// \p LoadBlockVarAddr is true, the address of the field of the block
1777  /// structure that holds the address of the __block structure.
1778  ///
1779  /// \param Flags The flag that will be passed to _Block_object_dispose.
1780  ///
1781  /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
1782  /// \p Addr to get the address of the __block structure.
1783  void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
1784  bool LoadBlockVarAddr, bool CanThrow);
1785 
1786  void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
1787  llvm::Value *ptr);
1788 
1789  Address LoadBlockStruct();
1790  Address GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1791 
1792  /// BuildBlockByrefAddress - Computes the location of the
1793  /// data in a variable which is declared as __block.
1794  Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
1795  bool followForward = true);
1796  Address emitBlockByrefAddress(Address baseAddr,
1797  const BlockByrefInfo &info,
1798  bool followForward,
1799  const llvm::Twine &name);
1800 
1801  const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
1802 
1803  QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
1804 
1805  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1806  const CGFunctionInfo &FnInfo);
1807 
1808  /// Annotate the function with an attribute that disables TSan checking at
1809  /// runtime.
1810  void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
1811 
1812  /// Emit code for the start of a function.
1813  /// \param Loc The location to be associated with the function.
1814  /// \param StartLoc The location of the function body.
1815  void StartFunction(GlobalDecl GD,
1816  QualType RetTy,
1817  llvm::Function *Fn,
1818  const CGFunctionInfo &FnInfo,
1819  const FunctionArgList &Args,
1821  SourceLocation StartLoc = SourceLocation());
1822 
1823  static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
1824 
1825  void EmitConstructorBody(FunctionArgList &Args);
1826  void EmitDestructorBody(FunctionArgList &Args);
1827  void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
1828  void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body);
1829  void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
1830 
1831  void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
1832  CallArgList &CallArgs);
1833  void EmitLambdaBlockInvokeBody();
1834  void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1835  void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
1836  void EmitAsanPrologueOrEpilogue(bool Prologue);
1837 
1838  /// Emit the unified return block, trying to avoid its emission when
1839  /// possible.
1840  /// \return The debug location of the user written return statement if the
1841  /// return block is is avoided.
1842  llvm::DebugLoc EmitReturnBlock();
1843 
1844  /// FinishFunction - Complete IR generation of the current function. It is
1845  /// legal to call this function even if there is no current insertion point.
1846  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1847 
1848  void StartThunk(llvm::Function *Fn, GlobalDecl GD,
1849  const CGFunctionInfo &FnInfo, bool IsUnprototyped);
1850 
1851  void EmitCallAndReturnForThunk(llvm::Constant *Callee, const ThunkInfo *Thunk,
1852  bool IsUnprototyped);
1853 
1854  void FinishThunk();
1855 
1856  /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
1857  void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
1858  llvm::Value *Callee);
1859 
1860  /// Generate a thunk for the given method.
1861  void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1862  GlobalDecl GD, const ThunkInfo &Thunk,
1863  bool IsUnprototyped);
1864 
1865  llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
1866  const CGFunctionInfo &FnInfo,
1867  GlobalDecl GD, const ThunkInfo &Thunk);
1868 
1869  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1870  FunctionArgList &Args);
1871 
1872  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
1873 
1874  /// Struct with all information about dynamic [sub]class needed to set vptr.
1875  struct VPtr {
1880  };
1881 
1882  /// Initialize the vtable pointer of the given subobject.
1883  void InitializeVTablePointer(const VPtr &vptr);
1884 
1886 
1887  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1888  VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
1889 
1890  void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
1891  CharUnits OffsetFromNearestVBase,
1892  bool BaseIsNonVirtualPrimaryBase,
1893  const CXXRecordDecl *VTableClass,
1894  VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
1895 
1896  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1897 
1898  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1899  /// to by This.
1900  llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
1901  const CXXRecordDecl *VTableClass);
1902 
1911  };
1912 
1913  /// Derived is the presumed address of an object of type T after a
1914  /// cast. If T is a polymorphic class type, emit a check that the virtual
1915  /// table for Derived belongs to a class derived from T.
1916  void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
1917  bool MayBeNull, CFITypeCheckKind TCK,
1918  SourceLocation Loc);
1919 
1920  /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
1921  /// If vptr CFI is enabled, emit a check that VTable is valid.
1922  void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
1923  CFITypeCheckKind TCK, SourceLocation Loc);
1924 
1925  /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
1926  /// RD using llvm.type.test.
1927  void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
1928  CFITypeCheckKind TCK, SourceLocation Loc);
1929 
1930  /// If whole-program virtual table optimization is enabled, emit an assumption
1931  /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
1932  /// enabled, emit a check that VTable is a member of RD's type identifier.
1933  void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
1934  llvm::Value *VTable, SourceLocation Loc);
1935 
1936  /// Returns whether we should perform a type checked load when loading a
1937  /// virtual function for virtual calls to members of RD. This is generally
1938  /// true when both vcall CFI and whole-program-vtables are enabled.
1939  bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
1940 
1941  /// Emit a type checked load from the given vtable.
1942  llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
1943  uint64_t VTableByteOffset);
1944 
1945  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1946  /// given phase of destruction for a destructor. The end result
1947  /// should call destructors on members and base classes in reverse
1948  /// order of their construction.
1949  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1950 
1951  /// ShouldInstrumentFunction - Return true if the current function should be
1952  /// instrumented with __cyg_profile_func_* calls
1953  bool ShouldInstrumentFunction();
1954 
1955  /// ShouldXRayInstrument - Return true if the current function should be
1956  /// instrumented with XRay nop sleds.
1957  bool ShouldXRayInstrumentFunction() const;
1958 
1959  /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
1960  /// XRay custom event handling calls.
1961  bool AlwaysEmitXRayCustomEvents() const;
1962 
1963  /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
1964  /// XRay typed event handling calls.
1965  bool AlwaysEmitXRayTypedEvents() const;
1966 
1967  /// Encode an address into a form suitable for use in a function prologue.
1968  llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
1969  llvm::Constant *Addr);
1970 
1971  /// Decode an address used in a function prologue, encoded by \c
1972  /// EncodeAddrForUseInPrologue.
1973  llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
1974  llvm::Value *EncodedAddr);
1975 
1976  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1977  /// arguments for the given function. This is also responsible for naming the
1978  /// LLVM function arguments.
1979  void EmitFunctionProlog(const CGFunctionInfo &FI,
1980  llvm::Function *Fn,
1981  const FunctionArgList &Args);
1982 
1983  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1984  /// given temporary.
1985  void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
1986  SourceLocation EndLoc);
1987 
1988  /// Emit a test that checks if the return value \p RV is nonnull.
1989  void EmitReturnValueCheck(llvm::Value *RV);
1990 
1991  /// EmitStartEHSpec - Emit the start of the exception spec.
1992  void EmitStartEHSpec(const Decl *D);
1993 
1994  /// EmitEndEHSpec - Emit the end of the exception spec.
1995  void EmitEndEHSpec(const Decl *D);
1996 
1997  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1998  llvm::BasicBlock *getTerminateLandingPad();
1999 
2000  /// getTerminateLandingPad - Return a cleanup funclet that just calls
2001  /// terminate.
2002  llvm::BasicBlock *getTerminateFunclet();
2003 
2004  /// getTerminateHandler - Return a handler (not a landing pad, just
2005  /// a catch handler) that just calls terminate. This is used when
2006  /// a terminate scope encloses a try.
2007  llvm::BasicBlock *getTerminateHandler();
2008 
2009  llvm::Type *ConvertTypeForMem(QualType T);
2010  llvm::Type *ConvertType(QualType T);
2011  llvm::Type *ConvertType(const TypeDecl *T) {
2012  return ConvertType(getContext().getTypeDeclType(T));
2013  }
2014 
2015  /// LoadObjCSelf - Load the value of self. This function is only valid while
2016  /// generating code for an Objective-C method.
2017  llvm::Value *LoadObjCSelf();
2018 
2019  /// TypeOfSelfObject - Return type of object that this self represents.
2020  QualType TypeOfSelfObject();
2021 
2022  /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2023  static TypeEvaluationKind getEvaluationKind(QualType T);
2024 
2026  return getEvaluationKind(T) == TEK_Scalar;
2027  }
2028 
2030  return getEvaluationKind(T) == TEK_Aggregate;
2031  }
2032 
2033  /// createBasicBlock - Create an LLVM basic block.
2034  llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2035  llvm::Function *parent = nullptr,
2036  llvm::BasicBlock *before = nullptr) {
2037  return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2038  }
2039 
2040  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2041  /// label maps to.
2042  JumpDest getJumpDestForLabel(const LabelDecl *S);
2043 
2044  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2045  /// another basic block, simplify it. This assumes that no other code could
2046  /// potentially reference the basic block.
2047  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2048 
2049  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2050  /// adding a fall-through branch from the current insert block if
2051  /// necessary. It is legal to call this function even if there is no current
2052  /// insertion point.
2053  ///
2054  /// IsFinished - If true, indicates that the caller has finished emitting
2055  /// branches to the given block and does not expect to emit code into it. This
2056  /// means the block can be ignored if it is unreachable.
2057  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2058 
2059  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2060  /// near its uses, and leave the insertion point in it.
2061  void EmitBlockAfterUses(llvm::BasicBlock *BB);
2062 
2063  /// EmitBranch - Emit a branch to the specified basic block from the current
2064  /// insert block, taking care to avoid creation of branches from dummy
2065  /// blocks. It is legal to call this function even if there is no current
2066  /// insertion point.
2067  ///
2068  /// This function clears the current insertion point. The caller should follow
2069  /// calls to this function with calls to Emit*Block prior to generation new
2070  /// code.
2071  void EmitBranch(llvm::BasicBlock *Block);
2072 
2073  /// HaveInsertPoint - True if an insertion point is defined. If not, this
2074  /// indicates that the current code being emitted is unreachable.
2075  bool HaveInsertPoint() const {
2076  return Builder.GetInsertBlock() != nullptr;
2077  }
2078 
2079  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2080  /// emitted IR has a place to go. Note that by definition, if this function
2081  /// creates a block then that block is unreachable; callers may do better to
2082  /// detect when no insertion point is defined and simply skip IR generation.
2084  if (!HaveInsertPoint())
2085  EmitBlock(createBasicBlock());
2086  }
2087 
2088  /// ErrorUnsupported - Print out an error that codegen doesn't support the
2089  /// specified stmt yet.
2090  void ErrorUnsupported(const Stmt *S, const char *Type);
2091 
2092  //===--------------------------------------------------------------------===//
2093  // Helpers
2094  //===--------------------------------------------------------------------===//
2095 
2098  return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2099  CGM.getTBAAAccessInfo(T));
2100  }
2101 
2103  TBAAAccessInfo TBAAInfo) {
2104  return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2105  }
2106 
2109  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
2110  LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
2111  }
2112 
2114  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
2115  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
2116  BaseInfo, TBAAInfo);
2117  }
2118 
2119  LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
2120  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
2121  CharUnits getNaturalTypeAlignment(QualType T,
2122  LValueBaseInfo *BaseInfo = nullptr,
2123  TBAAAccessInfo *TBAAInfo = nullptr,
2124  bool forPointeeType = false);
2125  CharUnits getNaturalPointeeTypeAlignment(QualType T,
2126  LValueBaseInfo *BaseInfo = nullptr,
2127  TBAAAccessInfo *TBAAInfo = nullptr);
2128 
2129  Address EmitLoadOfReference(LValue RefLVal,
2130  LValueBaseInfo *PointeeBaseInfo = nullptr,
2131  TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2132  LValue EmitLoadOfReferenceLValue(LValue RefLVal);
2134  AlignmentSource Source =
2136  LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2137  CGM.getTBAAAccessInfo(RefTy));
2138  return EmitLoadOfReferenceLValue(RefLVal);
2139  }
2140 
2141  Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2142  LValueBaseInfo *BaseInfo = nullptr,
2143  TBAAAccessInfo *TBAAInfo = nullptr);
2144  LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2145 
2146  /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2147  /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2148  /// insertion point of the builder. The caller is responsible for setting an
2149  /// appropriate alignment on
2150  /// the alloca.
2151  ///
2152  /// \p ArraySize is the number of array elements to be allocated if it
2153  /// is not nullptr.
2154  ///
2155  /// LangAS::Default is the address space of pointers to local variables and
2156  /// temporaries, as exposed in the source language. In certain
2157  /// configurations, this is not the same as the alloca address space, and a
2158  /// cast is needed to lift the pointer from the alloca AS into
2159  /// LangAS::Default. This can happen when the target uses a restricted
2160  /// address space for the stack but the source language requires
2161  /// LangAS::Default to be a generic address space. The latter condition is
2162  /// common for most programming languages; OpenCL is an exception in that
2163  /// LangAS::Default is the private address space, which naturally maps
2164  /// to the stack.
2165  ///
2166  /// Because the address of a temporary is often exposed to the program in
2167  /// various ways, this function will perform the cast. The original alloca
2168  /// instruction is returned through \p Alloca if it is not nullptr.
2169  ///
2170  /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2171  /// more efficient if the caller knows that the address will not be exposed.
2172  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2173  llvm::Value *ArraySize = nullptr);
2174  Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
2175  const Twine &Name = "tmp",
2176  llvm::Value *ArraySize = nullptr,
2177  Address *Alloca = nullptr);
2178  Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2179  const Twine &Name = "tmp",
2180  llvm::Value *ArraySize = nullptr);
2181 
2182  /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2183  /// default ABI alignment of the given LLVM type.
2184  ///
2185  /// IMPORTANT NOTE: This is *not* generally the right alignment for
2186  /// any given AST type that happens to have been lowered to the
2187  /// given IR type. This should only ever be used for function-local,
2188  /// IR-driven manipulations like saving and restoring a value. Do
2189  /// not hand this address off to arbitrary IRGen routines, and especially
2190  /// do not pass it as an argument to a function that might expect a
2191  /// properly ABI-aligned value.
2192  Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2193  const Twine &Name = "tmp");
2194 
2195  /// InitTempAlloca - Provide an initial value for the given alloca which
2196  /// will be observable at all locations in the function.
2197  ///
2198  /// The address should be something that was returned from one of
2199  /// the CreateTempAlloca or CreateMemTemp routines, and the
2200  /// initializer must be valid in the entry block (i.e. it must
2201  /// either be a constant or an argument value).
2202  void InitTempAlloca(Address Alloca, llvm::Value *Value);
2203 
2204  /// CreateIRTemp - Create a temporary IR object of the given type, with
2205  /// appropriate alignment. This routine should only be used when an temporary
2206  /// value needs to be stored into an alloca (for example, to avoid explicit
2207  /// PHI construction), but the type is the IR type, not the type appropriate
2208  /// for storing in memory.
2209  ///
2210  /// That is, this is exactly equivalent to CreateMemTemp, but calling
2211  /// ConvertType instead of ConvertTypeForMem.
2212  Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
2213 
2214  /// CreateMemTemp - Create a temporary memory object of the given type, with
2215  /// appropriate alignmen and cast it to the default address space. Returns
2216  /// the original alloca instruction by \p Alloca if it is not nullptr.
2217  Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
2218  Address *Alloca = nullptr);
2219  Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
2220  Address *Alloca = nullptr);
2221 
2222  /// CreateMemTemp - Create a temporary memory object of the given type, with
2223  /// appropriate alignmen without casting it to the default address space.
2224  Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2225  Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
2226  const Twine &Name = "tmp");
2227 
2228  /// CreateAggTemp - Create a temporary memory object for the given
2229  /// aggregate type.
2230  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
2231  return AggValueSlot::forAddr(CreateMemTemp(T, Name),
2232  T.getQualifiers(),
2233  AggValueSlot::IsNotDestructed,
2234  AggValueSlot::DoesNotNeedGCBarriers,
2235  AggValueSlot::IsNotAliased,
2236  AggValueSlot::DoesNotOverlap);
2237  }
2238 
2239  /// Emit a cast to void* in the appropriate address space.
2240  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
2241 
2242  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2243  /// expression and compare the result against zero, returning an Int1Ty value.
2244  llvm::Value *EvaluateExprAsBool(const Expr *E);
2245 
2246  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2247  void EmitIgnoredExpr(const Expr *E);
2248 
2249  /// EmitAnyExpr - Emit code to compute the specified expression which can have
2250  /// any type. The result is returned as an RValue struct. If this is an
2251  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2252  /// the result should be returned.
2253  ///
2254  /// \param ignoreResult True if the resulting value isn't used.
2255  RValue EmitAnyExpr(const Expr *E,
2256  AggValueSlot aggSlot = AggValueSlot::ignored(),
2257  bool ignoreResult = false);
2258 
2259  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2260  // or the value of the expression, depending on how va_list is defined.
2261  Address EmitVAListRef(const Expr *E);
2262 
2263  /// Emit a "reference" to a __builtin_ms_va_list; this is
2264  /// always the value of the expression, because a __builtin_ms_va_list is a
2265  /// pointer to a char.
2266  Address EmitMSVAListRef(const Expr *E);
2267 
2268  /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2269  /// always be accessible even if no aggregate location is provided.
2270  RValue EmitAnyExprToTemp(const Expr *E);
2271 
2272  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2273  /// arbitrary expression into the given memory location.
2274  void EmitAnyExprToMem(const Expr *E, Address Location,
2275  Qualifiers Quals, bool IsInitializer);
2276 
2277  void EmitAnyExprToExn(const Expr *E, Address Addr);
2278 
2279  /// EmitExprAsInit - Emits the code necessary to initialize a
2280  /// location in memory with the given initializer.
2281  void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2282  bool capturedByInit);
2283 
2284  /// hasVolatileMember - returns true if aggregate type has a volatile
2285  /// member.
2287  if (const RecordType *RT = T->getAs<RecordType>()) {
2288  const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2289  return RD->hasVolatileMember();
2290  }
2291  return false;
2292  }
2293 
2294  /// Determine whether a return value slot may overlap some other object.
2296  // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2297  // class subobjects. These cases may need to be revisited depending on the
2298  // resolution of the relevant core issue.
2299  return AggValueSlot::DoesNotOverlap;
2300  }
2301 
2302  /// Determine whether a field initialization may overlap some other object.
2304  // FIXME: These cases can result in overlap as a result of P0840R0's
2305  // [[no_unique_address]] attribute. We can still infer NoOverlap in the
2306  // presence of that attribute if the field is within the nvsize of its
2307  // containing class, because non-virtual subobjects are initialized in
2308  // address order.
2309  return AggValueSlot::DoesNotOverlap;
2310  }
2311 
2312  /// Determine whether a base class initialization may overlap some other
2313  /// object.
2314  AggValueSlot::Overlap_t overlapForBaseInit(const CXXRecordDecl *RD,
2315  const CXXRecordDecl *BaseRD,
2316  bool IsVirtual);
2317 
2318  /// Emit an aggregate assignment.
2319  void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
2320  bool IsVolatile = hasVolatileMember(EltTy);
2321  EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2322  }
2323 
2325  AggValueSlot::Overlap_t MayOverlap) {
2326  EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2327  }
2328 
2329  /// EmitAggregateCopy - Emit an aggregate copy.
2330  ///
2331  /// \param isVolatile \c true iff either the source or the destination is
2332  /// volatile.
2333  /// \param MayOverlap Whether the tail padding of the destination might be
2334  /// occupied by some other object. More efficient code can often be
2335  /// generated if not.
2336  void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
2337  AggValueSlot::Overlap_t MayOverlap,
2338  bool isVolatile = false);
2339 
2340  /// GetAddrOfLocalVar - Return the address of a local variable.
2342  auto it = LocalDeclMap.find(VD);
2343  assert(it != LocalDeclMap.end() &&
2344  "Invalid argument to GetAddrOfLocalVar(), no decl!");
2345  return it->second;
2346  }
2347 
2348  /// Given an opaque value expression, return its LValue mapping if it exists,
2349  /// otherwise create one.
2350  LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
2351 
2352  /// Given an opaque value expression, return its RValue mapping if it exists,
2353  /// otherwise create one.
2354  RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
2355 
2356  /// Get the index of the current ArrayInitLoopExpr, if any.
2357  llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
2358 
2359  /// getAccessedFieldNo - Given an encoded value and a result number, return
2360  /// the input field number being accessed.
2361  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
2362 
2363  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
2364  llvm::BasicBlock *GetIndirectGotoBlock();
2365 
2366  /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
2367  static bool IsWrappedCXXThis(const Expr *E);
2368 
2369  /// EmitNullInitialization - Generate code to set a value of the given type to
2370  /// null, If the type contains data member pointers, they will be initialized
2371  /// to -1 in accordance with the Itanium C++ ABI.
2372  void EmitNullInitialization(Address DestPtr, QualType Ty);
2373 
2374  /// Emits a call to an LLVM variable-argument intrinsic, either
2375  /// \c llvm.va_start or \c llvm.va_end.
2376  /// \param ArgValue A reference to the \c va_list as emitted by either
2377  /// \c EmitVAListRef or \c EmitMSVAListRef.
2378  /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
2379  /// calls \c llvm.va_end.
2380  llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
2381 
2382  /// Generate code to get an argument from the passed in pointer
2383  /// and update it accordingly.
2384  /// \param VE The \c VAArgExpr for which to generate code.
2385  /// \param VAListAddr Receives a reference to the \c va_list as emitted by
2386  /// either \c EmitVAListRef or \c EmitMSVAListRef.
2387  /// \returns A pointer to the argument.
2388  // FIXME: We should be able to get rid of this method and use the va_arg
2389  // instruction in LLVM instead once it works well enough.
2390  Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
2391 
2392  /// emitArrayLength - Compute the length of an array, even if it's a
2393  /// VLA, and drill down to the base element type.
2394  llvm::Value *emitArrayLength(const ArrayType *arrayType,
2395  QualType &baseType,
2396  Address &addr);
2397 
2398  /// EmitVLASize - Capture all the sizes for the VLA expressions in
2399  /// the given variably-modified type and store them in the VLASizeMap.
2400  ///
2401  /// This function can be called with a null (unreachable) insert point.
2402  void EmitVariablyModifiedType(QualType Ty);
2403 
2404  struct VlaSizePair {
2407 
2408  VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
2409  };
2410 
2411  /// Return the number of elements for a single dimension
2412  /// for the given array type.
2413  VlaSizePair getVLAElements1D(const VariableArrayType *vla);
2414  VlaSizePair getVLAElements1D(QualType vla);
2415 
2416  /// Returns an LLVM value that corresponds to the size,
2417  /// in non-variably-sized elements, of a variable length array type,
2418  /// plus that largest non-variably-sized element type. Assumes that
2419  /// the type has already been emitted with EmitVariablyModifiedType.
2420  VlaSizePair getVLASize(const VariableArrayType *vla);
2421  VlaSizePair getVLASize(QualType vla);
2422 
2423  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
2424  /// generating code for an C++ member function.
2426  assert(CXXThisValue && "no 'this' value for this function");
2427  return CXXThisValue;
2428  }
2429  Address LoadCXXThisAddress();
2430 
2431  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
2432  /// virtual bases.
2433  // FIXME: Every place that calls LoadCXXVTT is something
2434  // that needs to be abstracted properly.
2436  assert(CXXStructorImplicitParamValue && "no VTT value for this function");
2437  return CXXStructorImplicitParamValue;
2438  }
2439 
2440  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
2441  /// complete class to the given direct base.
2442  Address
2443  GetAddressOfDirectBaseInCompleteClass(Address Value,
2444  const CXXRecordDecl *Derived,
2445  const CXXRecordDecl *Base,
2446  bool BaseIsVirtual);
2447 
2448  static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
2449 
2450  /// GetAddressOfBaseClass - This function will add the necessary delta to the
2451  /// load of 'this' and returns address of the base class.
2452  Address GetAddressOfBaseClass(Address Value,
2453  const CXXRecordDecl *Derived,
2456  bool NullCheckValue, SourceLocation Loc);
2457 
2458  Address GetAddressOfDerivedClass(Address Value,
2459  const CXXRecordDecl *Derived,
2462  bool NullCheckValue);
2463 
2464  /// GetVTTParameter - Return the VTT parameter that should be passed to a
2465  /// base constructor/destructor with virtual bases.
2466  /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
2467  /// to ItaniumCXXABI.cpp together with all the references to VTT.
2468  llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
2469  bool Delegating);
2470 
2471  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
2472  CXXCtorType CtorType,
2473  const FunctionArgList &Args,
2474  SourceLocation Loc);
2475  // It's important not to confuse this and the previous function. Delegating
2476  // constructors are the C++0x feature. The constructor delegate optimization
2477  // is used to reduce duplication in the base and complete consturctors where
2478  // they are substantially the same.
2479  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2480  const FunctionArgList &Args);
2481 
2482  /// Emit a call to an inheriting constructor (that is, one that invokes a
2483  /// constructor inherited from a base class) by inlining its definition. This
2484  /// is necessary if the ABI does not support forwarding the arguments to the
2485  /// base class constructor (because they're variadic or similar).
2486  void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2487  CXXCtorType CtorType,
2488  bool ForVirtualBase,
2489  bool Delegating,
2490  CallArgList &Args);
2491 
2492  /// Emit a call to a constructor inherited from a base class, passing the
2493  /// current constructor's arguments along unmodified (without even making
2494  /// a copy).
2495  void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
2496  bool ForVirtualBase, Address This,
2497  bool InheritedFromVBase,
2498  const CXXInheritedCtorInitExpr *E);
2499 
2500  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2501  bool ForVirtualBase, bool Delegating,
2502  Address This, const CXXConstructExpr *E,
2503  AggValueSlot::Overlap_t Overlap,
2504  bool NewPointerIsChecked);
2505 
2506  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2507  bool ForVirtualBase, bool Delegating,
2508  Address This, CallArgList &Args,
2509  AggValueSlot::Overlap_t Overlap,
2510  SourceLocation Loc,
2511  bool NewPointerIsChecked);
2512 
2513  /// Emit assumption load for all bases. Requires to be be called only on
2514  /// most-derived class and not under construction of the object.
2515  void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
2516 
2517  /// Emit assumption that vptr load == global vtable.
2518  void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
2519 
2520  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
2521  Address This, Address Src,
2522  const CXXConstructExpr *E);
2523 
2524  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2525  const ArrayType *ArrayTy,
2526  Address ArrayPtr,
2527  const CXXConstructExpr *E,
2528  bool NewPointerIsChecked,
2529  bool ZeroInitialization = false);
2530 
2531  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2532  llvm::Value *NumElements,
2533  Address ArrayPtr,
2534  const CXXConstructExpr *E,
2535  bool NewPointerIsChecked,
2536  bool ZeroInitialization = false);
2537 
2538  static Destroyer destroyCXXObject;
2539 
2540  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
2541  bool ForVirtualBase, bool Delegating,
2542  Address This);
2543 
2544  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
2545  llvm::Type *ElementTy, Address NewPtr,
2546  llvm::Value *NumElements,
2547  llvm::Value *AllocSizeWithoutCookie);
2548 
2549  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
2550  Address Ptr);
2551 
2552  llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
2553  void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
2554 
2555  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
2556  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
2557 
2558  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
2559  QualType DeleteTy, llvm::Value *NumElements = nullptr,
2560  CharUnits CookieSize = CharUnits());
2561 
2562  RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
2563  const CallExpr *TheCallExpr, bool IsDelete);
2564 
2565  llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
2566  llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
2567  Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
2568 
2569  /// Situations in which we might emit a check for the suitability of a
2570  /// pointer or glvalue.
2572  /// Checking the operand of a load. Must be suitably sized and aligned.
2574  /// Checking the destination of a store. Must be suitably sized and aligned.
2576  /// Checking the bound value in a reference binding. Must be suitably sized
2577  /// and aligned, but is not required to refer to an object (until the
2578  /// reference is used), per core issue 453.
2580  /// Checking the object expression in a non-static data member access. Must
2581  /// be an object within its lifetime.
2583  /// Checking the 'this' pointer for a call to a non-static member function.
2584  /// Must be an object within its lifetime.
2586  /// Checking the 'this' pointer for a constructor call.
2588  /// Checking the operand of a static_cast to a derived pointer type. Must be
2589  /// null or an object within its lifetime.
2591  /// Checking the operand of a static_cast to a derived reference type. Must
2592  /// be an object within its lifetime.
2594  /// Checking the operand of a cast to a base object. Must be suitably sized
2595  /// and aligned.
2597  /// Checking the operand of a cast to a virtual base object. Must be an
2598  /// object within its lifetime.
2600  /// Checking the value assigned to a _Nonnull pointer. Must not be null.
2602  /// Checking the operand of a dynamic_cast or a typeid expression. Must be
2603  /// null or an object within its lifetime.
2604  TCK_DynamicOperation
2605  };
2606 
2607  /// Determine whether the pointer type check \p TCK permits null pointers.
2608  static bool isNullPointerAllowed(TypeCheckKind TCK);
2609 
2610  /// Determine whether the pointer type check \p TCK requires a vptr check.
2611  static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
2612 
2613  /// Whether any type-checking sanitizers are enabled. If \c false,
2614  /// calls to EmitTypeCheck can be skipped.
2615  bool sanitizePerformTypeCheck() const;
2616 
2617  /// Emit a check that \p V is the address of storage of the
2618  /// appropriate size and alignment for an object of type \p Type.
2619  void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
2620  QualType Type, CharUnits Alignment = CharUnits::Zero(),
2621  SanitizerSet SkippedChecks = SanitizerSet());
2622 
2623  /// Emit a check that \p Base points into an array object, which
2624  /// we can access at index \p Index. \p Accessed should be \c false if we
2625  /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
2626  void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
2627  QualType IndexType, bool Accessed);
2628 
2629  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2630  bool isInc, bool isPre);
2631  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
2632  bool isInc, bool isPre);
2633 
2634  void EmitAlignmentAssumption(llvm::Value *PtrValue, unsigned Alignment,
2635  llvm::Value *OffsetValue = nullptr) {
2636  Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
2637  OffsetValue);
2638  }
2639 
2640  /// Converts Location to a DebugLoc, if debug information is enabled.
2641  llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
2642 
2643 
2644  //===--------------------------------------------------------------------===//
2645  // Declaration Emission
2646  //===--------------------------------------------------------------------===//
2647 
2648  /// EmitDecl - Emit a declaration.
2649  ///
2650  /// This function can be called with a null (unreachable) insert point.
2651  void EmitDecl(const Decl &D);
2652 
2653  /// EmitVarDecl - Emit a local variable declaration.
2654  ///
2655  /// This function can be called with a null (unreachable) insert point.
2656  void EmitVarDecl(const VarDecl &D);
2657 
2658  void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2659  bool capturedByInit);
2660 
2661  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
2662  llvm::Value *Address);
2663 
2664  /// Determine whether the given initializer is trivial in the sense
2665  /// that it requires no code to be generated.
2666  bool isTrivialInitializer(const Expr *Init);
2667 
2668  /// EmitAutoVarDecl - Emit an auto variable declaration.
2669  ///
2670  /// This function can be called with a null (unreachable) insert point.
2671  void EmitAutoVarDecl(const VarDecl &D);
2672 
2674  friend class CodeGenFunction;
2675 
2676  const VarDecl *Variable;
2677 
2678  /// The address of the alloca for languages with explicit address space
2679  /// (e.g. OpenCL) or alloca casted to generic pointer for address space
2680  /// agnostic languages (e.g. C++). Invalid if the variable was emitted
2681  /// as a global constant.
2682  Address Addr;
2683 
2684  llvm::Value *NRVOFlag;
2685 
2686  /// True if the variable is a __block variable.
2687  bool IsByRef;
2688 
2689  /// True if the variable is of aggregate type and has a constant
2690  /// initializer.
2691  bool IsConstantAggregate;
2692 
2693  /// Non-null if we should use lifetime annotations.
2694  llvm::Value *SizeForLifetimeMarkers;
2695 
2696  /// Address with original alloca instruction. Invalid if the variable was
2697  /// emitted as a global constant.
2698  Address AllocaAddr;
2699 
2700  struct Invalid {};
2701  AutoVarEmission(Invalid)
2702  : Variable(nullptr), Addr(Address::invalid()),
2703  AllocaAddr(Address::invalid()) {}
2704 
2705  AutoVarEmission(const VarDecl &variable)
2706  : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
2707  IsByRef(false), IsConstantAggregate(false),
2708  SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
2709 
2710  bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
2711 
2712  public:
2713  static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
2714 
2715  bool useLifetimeMarkers() const {
2716  return SizeForLifetimeMarkers != nullptr;
2717  }
2719  assert(useLifetimeMarkers());
2720  return SizeForLifetimeMarkers;
2721  }
2722 
2723  /// Returns the raw, allocated address, which is not necessarily
2724  /// the address of the object itself. It is casted to default
2725  /// address space for address space agnostic languages.
2727  return Addr;
2728  }
2729 
2730  /// Returns the address for the original alloca instruction.
2731  Address getOriginalAllocatedAddress() const { return AllocaAddr; }
2732 
2733  /// Returns the address of the object within this declaration.
2734  /// Note that this does not chase the forwarding pointer for
2735  /// __block decls.
2736  Address getObjectAddress(CodeGenFunction &CGF) const {
2737  if (!IsByRef) return Addr;
2738 
2739  return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
2740  }
2741  };
2742  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
2743  void EmitAutoVarInit(const AutoVarEmission &emission);
2744  void EmitAutoVarCleanups(const AutoVarEmission &emission);
2745  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
2746  QualType::DestructionKind dtorKind);
2747 
2748  /// Emits the alloca and debug information for the size expressions for each
2749  /// dimension of an array. It registers the association of its (1-dimensional)
2750  /// QualTypes and size expression's debug node, so that CGDebugInfo can
2751  /// reference this node when creating the DISubrange object to describe the
2752  /// array types.
2753  void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI,
2754  const VarDecl &D,
2755  bool EmitDebugInfo);
2756 
2757  void EmitStaticVarDecl(const VarDecl &D,
2758  llvm::GlobalValue::LinkageTypes Linkage);
2759 
2760  class ParamValue {
2761  llvm::Value *Value;
2762  unsigned Alignment;
2763  ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
2764  public:
2766  return ParamValue(value, 0);
2767  }
2769  assert(!addr.getAlignment().isZero());
2770  return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
2771  }
2772 
2773  bool isIndirect() const { return Alignment != 0; }
2774  llvm::Value *getAnyValue() const { return Value; }
2775 
2777  assert(!isIndirect());
2778  return Value;
2779  }
2780 
2782  assert(isIndirect());
2783  return Address(Value, CharUnits::fromQuantity(Alignment));
2784  }
2785  };
2786 
2787  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
2788  void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
2789 
2790  /// protectFromPeepholes - Protect a value that we're intending to
2791  /// store to the side, but which will probably be used later, from
2792  /// aggressive peepholing optimizations that might delete it.
2793  ///
2794  /// Pass the result to unprotectFromPeepholes to declare that
2795  /// protection is no longer required.
2796  ///
2797  /// There's no particular reason why this shouldn't apply to
2798  /// l-values, it's just that no existing peepholes work on pointers.
2799  PeepholeProtection protectFromPeepholes(RValue rvalue);
2800  void unprotectFromPeepholes(PeepholeProtection protection);
2801 
2803  llvm::Value *OffsetValue = nullptr) {
2804  Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
2805  OffsetValue);
2806  }
2807 
2808  //===--------------------------------------------------------------------===//
2809  // Statement Emission
2810  //===--------------------------------------------------------------------===//
2811 
2812  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
2813  void EmitStopPoint(const Stmt *S);
2814 
2815  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
2816  /// this function even if there is no current insertion point.
2817  ///
2818  /// This function may clear the current insertion point; callers should use
2819  /// EnsureInsertPoint if they wish to subsequently generate code without first
2820  /// calling EmitBlock, EmitBranch, or EmitStmt.
2821  void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = None);
2822 
2823  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
2824  /// necessarily require an insertion point or debug information; typically
2825  /// because the statement amounts to a jump or a container of other
2826  /// statements.
2827  ///
2828  /// \return True if the statement was handled.
2829  bool EmitSimpleStmt(const Stmt *S);
2830 
2831  Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
2832  AggValueSlot AVS = AggValueSlot::ignored());
2833  Address EmitCompoundStmtWithoutScope(const CompoundStmt &S,
2834  bool GetLast = false,
2835  AggValueSlot AVS =
2836  AggValueSlot::ignored());
2837 
2838  /// EmitLabel - Emit the block for the given label. It is legal to call this
2839  /// function even if there is no current insertion point.
2840  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
2841 
2842  void EmitLabelStmt(const LabelStmt &S);
2843  void EmitAttributedStmt(const AttributedStmt &S);
2844  void EmitGotoStmt(const GotoStmt &S);
2845  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
2846  void EmitIfStmt(const IfStmt &S);
2847 
2848  void EmitWhileStmt(const WhileStmt &S,
2849  ArrayRef<const Attr *> Attrs = None);
2850  void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
2851  void EmitForStmt(const ForStmt &S,
2852  ArrayRef<const Attr *> Attrs = None);
2853  void EmitReturnStmt(const ReturnStmt &S);
2854  void EmitDeclStmt(const DeclStmt &S);
2855  void EmitBreakStmt(const BreakStmt &S);
2856  void EmitContinueStmt(const ContinueStmt &S);
2857  void EmitSwitchStmt(const SwitchStmt &S);
2858  void EmitDefaultStmt(const DefaultStmt &S);
2859  void EmitCaseStmt(const CaseStmt &S);
2860  void EmitCaseStmtRange(const CaseStmt &S);
2861  void EmitAsmStmt(const AsmStmt &S);
2862 
2863  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
2864  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
2865  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
2866  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
2867  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
2868 
2869  void EmitCoroutineBody(const CoroutineBodyStmt &S);
2870  void EmitCoreturnStmt(const CoreturnStmt &S);
2871  RValue EmitCoawaitExpr(const CoawaitExpr &E,
2872  AggValueSlot aggSlot = AggValueSlot::ignored(),
2873  bool ignoreResult = false);
2874  LValue EmitCoawaitLValue(const CoawaitExpr *E);
2875  RValue EmitCoyieldExpr(const CoyieldExpr &E,
2876  AggValueSlot aggSlot = AggValueSlot::ignored(),
2877  bool ignoreResult = false);
2878  LValue EmitCoyieldLValue(const CoyieldExpr *E);
2879  RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
2880 
2881  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2882  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2883 
2884  void EmitCXXTryStmt(const CXXTryStmt &S);
2885  void EmitSEHTryStmt(const SEHTryStmt &S);
2886  void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
2887  void EnterSEHTryStmt(const SEHTryStmt &S);
2888  void ExitSEHTryStmt(const SEHTryStmt &S);
2889 
2890  void pushSEHCleanup(CleanupKind kind,
2891  llvm::Function *FinallyFunc);
2892  void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
2893  const Stmt *OutlinedStmt);
2894 
2895  llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
2896  const SEHExceptStmt &Except);
2897 
2898  llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
2899  const SEHFinallyStmt &Finally);
2900 
2901  void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
2902  llvm::Value *ParentFP,
2903  llvm::Value *EntryEBP);
2904  llvm::Value *EmitSEHExceptionCode();
2905  llvm::Value *EmitSEHExceptionInfo();
2906  llvm::Value *EmitSEHAbnormalTermination();
2907 
2908  /// Emit simple code for OpenMP directives in Simd-only mode.
2909  void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
2910 
2911  /// Scan the outlined statement for captures from the parent function. For
2912  /// each capture, mark the capture as escaped and emit a call to
2913  /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
2914  void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
2915  bool IsFilter);
2916 
2917  /// Recovers the address of a local in a parent function. ParentVar is the
2918  /// address of the variable used in the immediate parent function. It can
2919  /// either be an alloca or a call to llvm.localrecover if there are nested
2920  /// outlined functions. ParentFP is the frame pointer of the outermost parent
2921  /// frame.
2922  Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
2923  Address ParentVar,
2924  llvm::Value *ParentFP);
2925 
2926  void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
2927  ArrayRef<const Attr *> Attrs = None);
2928 
2929  /// Controls insertion of cancellation exit blocks in worksharing constructs.
2931  CodeGenFunction &CGF;
2932 
2933  public:
2934  OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
2935  bool HasCancel)
2936  : CGF(CGF) {
2937  CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
2938  }
2939  ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
2940  };
2941 
2942  /// Returns calculated size of the specified type.
2943  llvm::Value *getTypeSize(QualType Ty);
2944  LValue InitCapturedStruct(const CapturedStmt &S);
2945  llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
2946  llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
2947  Address GenerateCapturedStmtArgument(const CapturedStmt &S);
2948  llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
2949  void GenerateOpenMPCapturedVars(const CapturedStmt &S,
2950  SmallVectorImpl<llvm::Value *> &CapturedVars);
2951  void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
2952  SourceLocation Loc);
2953  /// Perform element by element copying of arrays with type \a
2954  /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
2955  /// generated by \a CopyGen.
2956  ///
2957  /// \param DestAddr Address of the destination array.
2958  /// \param SrcAddr Address of the source array.
2959  /// \param OriginalType Type of destination and source arrays.
2960  /// \param CopyGen Copying procedure that copies value of single array element
2961  /// to another single array element.
2962  void EmitOMPAggregateAssign(
2963  Address DestAddr, Address SrcAddr, QualType OriginalType,
2964  const llvm::function_ref<void(Address, Address)> CopyGen);
2965  /// Emit proper copying of data from one variable to another.
2966  ///
2967  /// \param OriginalType Original type of the copied variables.
2968  /// \param DestAddr Destination address.
2969  /// \param SrcAddr Source address.
2970  /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
2971  /// type of the base array element).
2972  /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
2973  /// the base array element).
2974  /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
2975  /// DestVD.
2976  void EmitOMPCopy(QualType OriginalType,
2977  Address DestAddr, Address SrcAddr,
2978  const VarDecl *DestVD, const VarDecl *SrcVD,
2979  const Expr *Copy);
2980  /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
2981  /// \a X = \a E \a BO \a E.
2982  ///
2983  /// \param X Value to be updated.
2984  /// \param E Update value.
2985  /// \param BO Binary operation for update operation.
2986  /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
2987  /// expression, false otherwise.
2988  /// \param AO Atomic ordering of the generated atomic instructions.
2989  /// \param CommonGen Code generator for complex expressions that cannot be
2990  /// expressed through atomicrmw instruction.
2991  /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
2992  /// generated, <false, RValue::get(nullptr)> otherwise.
2993  std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
2994  LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
2995  llvm::AtomicOrdering AO, SourceLocation Loc,
2996  const llvm::function_ref<RValue(RValue)> CommonGen);
2997  bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
2998  OMPPrivateScope &PrivateScope);
2999  void EmitOMPPrivateClause(const OMPExecutableDirective &D,
3000  OMPPrivateScope &PrivateScope);
3001  void EmitOMPUseDevicePtrClause(
3002  const OMPClause &C, OMPPrivateScope &PrivateScope,
3003  const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
3004  /// Emit code for copyin clause in \a D directive. The next code is
3005  /// generated at the start of outlined functions for directives:
3006  /// \code
3007  /// threadprivate_var1 = master_threadprivate_var1;
3008  /// operator=(threadprivate_var2, master_threadprivate_var2);
3009  /// ...
3010  /// __kmpc_barrier(&loc, global_tid);
3011  /// \endcode
3012  ///
3013  /// \param D OpenMP directive possibly with 'copyin' clause(s).
3014  /// \returns true if at least one copyin variable is found, false otherwise.
3015  bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
3016  /// Emit initial code for lastprivate variables. If some variable is
3017  /// not also firstprivate, then the default initialization is used. Otherwise
3018  /// initialization of this variable is performed by EmitOMPFirstprivateClause
3019  /// method.
3020  ///
3021  /// \param D Directive that may have 'lastprivate' directives.
3022  /// \param PrivateScope Private scope for capturing lastprivate variables for
3023  /// proper codegen in internal captured statement.
3024  ///
3025  /// \returns true if there is at least one lastprivate variable, false
3026  /// otherwise.
3027  bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
3028  OMPPrivateScope &PrivateScope);
3029  /// Emit final copying of lastprivate values to original variables at
3030  /// the end of the worksharing or simd directive.
3031  ///
3032  /// \param D Directive that has at least one 'lastprivate' directives.
3033  /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3034  /// it is the last iteration of the loop code in associated directive, or to
3035  /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3036  void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
3037  bool NoFinals,
3038  llvm::Value *IsLastIterCond = nullptr);
3039  /// Emit initial code for linear clauses.
3040  void EmitOMPLinearClause(const OMPLoopDirective &D,
3041  CodeGenFunction::OMPPrivateScope &PrivateScope);
3042  /// Emit final code for linear clauses.
3043  /// \param CondGen Optional conditional code for final part of codegen for
3044  /// linear clause.
3045  void EmitOMPLinearClauseFinal(
3046  const OMPLoopDirective &D,
3047  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3048  /// Emit initial code for reduction variables. Creates reduction copies
3049  /// and initializes them with the values according to OpenMP standard.
3050  ///
3051  /// \param D Directive (possibly) with the 'reduction' clause.
3052  /// \param PrivateScope Private scope for capturing reduction variables for
3053  /// proper codegen in internal captured statement.
3054  ///
3055  void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
3056  OMPPrivateScope &PrivateScope);
3057  /// Emit final update of reduction values to original variables at
3058  /// the end of the directive.
3059  ///
3060  /// \param D Directive that has at least one 'reduction' directives.
3061  /// \param ReductionKind The kind of reduction to perform.
3062  void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
3063  const OpenMPDirectiveKind ReductionKind);
3064  /// Emit initial code for linear variables. Creates private copies
3065  /// and initializes them with the values according to OpenMP standard.
3066  ///
3067  /// \param D Directive (possibly) with the 'linear' clause.
3068  /// \return true if at least one linear variable is found that should be
3069  /// initialized with the value of the original variable, false otherwise.
3070  bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
3071 
3072  typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3073  llvm::Value * /*OutlinedFn*/,
3074  const OMPTaskDataTy & /*Data*/)>
3076  void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
3077  const OpenMPDirectiveKind CapturedRegion,
3078  const RegionCodeGenTy &BodyGen,
3079  const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3081  Address BasePointersArray = Address::invalid();
3082  Address PointersArray = Address::invalid();
3083  Address SizesArray = Address::invalid();
3084  unsigned NumberOfTargetItems = 0;
3085  explicit OMPTargetDataInfo() = default;
3086  OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
3087  Address SizesArray, unsigned NumberOfTargetItems)
3088  : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
3089  SizesArray(SizesArray), NumberOfTargetItems(NumberOfTargetItems) {}
3090  };
3091  void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
3092  const RegionCodeGenTy &BodyGen,
3093  OMPTargetDataInfo &InputInfo);
3094 
3095  void EmitOMPParallelDirective(const OMPParallelDirective &S);
3096  void EmitOMPSimdDirective(const OMPSimdDirective &S);
3097  void EmitOMPForDirective(const OMPForDirective &S);
3098  void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
3099  void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
3100  void EmitOMPSectionDirective(const OMPSectionDirective &S);
3101  void EmitOMPSingleDirective(const OMPSingleDirective &S);
3102  void EmitOMPMasterDirective(const OMPMasterDirective &S);
3103  void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
3104  void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
3105  void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
3106  void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
3107  void EmitOMPTaskDirective(const OMPTaskDirective &S);
3108  void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
3109  void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
3110  void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
3111  void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
3112  void EmitOMPFlushDirective(const OMPFlushDirective &S);
3113  void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
3114  void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
3115  void EmitOMPTargetDirective(const OMPTargetDirective &S);
3116  void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
3117  void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
3118  void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
3119  void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
3120  void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
3121  void
3122  EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
3123  void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
3124  void
3125  EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
3126  void EmitOMPCancelDirective(const OMPCancelDirective &S);
3127  void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
3128  void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
3129  void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
3130  void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
3131  void EmitOMPDistributeParallelForDirective(
3133  void EmitOMPDistributeParallelForSimdDirective(
3135  void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
3136  void EmitOMPTargetParallelForSimdDirective(
3138  void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
3139  void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
3140  void
3141  EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
3142  void EmitOMPTeamsDistributeParallelForSimdDirective(
3144  void EmitOMPTeamsDistributeParallelForDirective(
3146  void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3147  void EmitOMPTargetTeamsDistributeDirective(
3149  void EmitOMPTargetTeamsDistributeParallelForDirective(
3151  void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3153  void EmitOMPTargetTeamsDistributeSimdDirective(
3155 
3156  /// Emit device code for the target directive.
3157  static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3158  StringRef ParentName,
3159  const OMPTargetDirective &S);
3160  static void
3161  EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3162  const OMPTargetParallelDirective &S);
3163  /// Emit device code for the target parallel for directive.
3164  static void EmitOMPTargetParallelForDeviceFunction(
3165  CodeGenModule &CGM, StringRef ParentName,
3167  /// Emit device code for the target parallel for simd directive.
3168  static void EmitOMPTargetParallelForSimdDeviceFunction(
3169  CodeGenModule &CGM, StringRef ParentName,
3171  /// Emit device code for the target teams directive.
3172  static void
3173  EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3174  const OMPTargetTeamsDirective &S);
3175  /// Emit device code for the target teams distribute directive.
3176  static void EmitOMPTargetTeamsDistributeDeviceFunction(
3177  CodeGenModule &CGM, StringRef ParentName,
3179  /// Emit device code for the target teams distribute simd directive.
3180  static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3181  CodeGenModule &CGM, StringRef ParentName,
3183  /// Emit device code for the target simd directive.
3184  static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
3185  StringRef ParentName,
3186  const OMPTargetSimdDirective &S);
3187  /// Emit device code for the target teams distribute parallel for simd
3188  /// directive.
3189  static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3190  CodeGenModule &CGM, StringRef ParentName,
3192 
3193  static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3194  CodeGenModule &CGM, StringRef ParentName,
3196  /// Emit inner loop of the worksharing/simd construct.
3197  ///
3198  /// \param S Directive, for which the inner loop must be emitted.
3199  /// \param RequiresCleanup true, if directive has some associated private
3200  /// variables.
3201  /// \param LoopCond Bollean condition for loop continuation.
3202  /// \param IncExpr Increment expression for loop control variable.
3203  /// \param BodyGen Generator for the inner body of the inner loop.
3204  /// \param PostIncGen Genrator for post-increment code (required for ordered
3205  /// loop directvies).
3206  void EmitOMPInnerLoop(
3207  const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
3208  const Expr *IncExpr,
3209  const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3210  const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3211 
3212  JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
3213  /// Emit initial code for loop counters of loop-based directives.
3214  void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
3215  OMPPrivateScope &LoopScope);
3216 
3217  /// Helper for the OpenMP loop directives.
3218  void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
3219 
3220  /// Emit code for the worksharing loop-based directive.
3221  /// \return true, if this construct has any lastprivate clause, false -
3222  /// otherwise.
3223  bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
3224  const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3225  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3226 
3227  /// Emit code for the distribute loop-based directive.
3228  void EmitOMPDistributeLoop(const OMPLoopDirective &S,
3229  const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
3230 
3231  /// Helpers for the OpenMP loop directives.
3232  void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
3233  void EmitOMPSimdFinal(
3234  const OMPLoopDirective &D,
3235  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3236 
3237  /// Emits the lvalue for the expression with possibly captured variable.
3238  LValue EmitOMPSharedLValue(const Expr *E);
3239 
3240 private:
3241  /// Helpers for blocks.
3242  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
3243 
3244  /// struct with the values to be passed to the OpenMP loop-related functions
3245  struct OMPLoopArguments {
3246  /// loop lower bound
3247  Address LB = Address::invalid();
3248  /// loop upper bound
3249  Address UB = Address::invalid();
3250  /// loop stride
3251  Address ST = Address::invalid();
3252  /// isLastIteration argument for runtime functions
3253  Address IL = Address::invalid();
3254  /// Chunk value generated by sema
3255  llvm::Value *Chunk = nullptr;
3256  /// EnsureUpperBound
3257  Expr *EUB = nullptr;
3258  /// IncrementExpression
3259  Expr *IncExpr = nullptr;
3260  /// Loop initialization
3261  Expr *Init = nullptr;
3262  /// Loop exit condition
3263  Expr *Cond = nullptr;
3264  /// Update of LB after a whole chunk has been executed
3265  Expr *NextLB = nullptr;
3266  /// Update of UB after a whole chunk has been executed
3267  Expr *NextUB = nullptr;
3268  OMPLoopArguments() = default;
3269  OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
3270  llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
3271  Expr *IncExpr = nullptr, Expr *Init = nullptr,
3272  Expr *Cond = nullptr, Expr *NextLB = nullptr,
3273  Expr *NextUB = nullptr)
3274  : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
3275  IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
3276  NextUB(NextUB) {}
3277  };
3278  void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
3279  const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
3280  const OMPLoopArguments &LoopArgs,
3281  const CodeGenLoopTy &CodeGenLoop,
3282  const CodeGenOrderedTy &CodeGenOrdered);
3283  void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
3284  bool IsMonotonic, const OMPLoopDirective &S,
3285  OMPPrivateScope &LoopScope, bool Ordered,
3286  const OMPLoopArguments &LoopArgs,
3287  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3288  void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
3289  const OMPLoopDirective &S,
3290  OMPPrivateScope &LoopScope,
3291  const OMPLoopArguments &LoopArgs,
3292  const CodeGenLoopTy &CodeGenLoopContent);
3293  /// Emit code for sections directive.
3294  void EmitSections(const OMPExecutableDirective &S);
3295 
3296 public:
3297 
3298  //===--------------------------------------------------------------------===//
3299  // LValue Expression Emission
3300  //===--------------------------------------------------------------------===//
3301 
3302  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
3303  RValue GetUndefRValue(QualType Ty);
3304 
3305  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
3306  /// and issue an ErrorUnsupported style diagnostic (using the
3307  /// provided Name).
3308  RValue EmitUnsupportedRValue(const Expr *E,
3309  const char *Name);
3310 
3311  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
3312  /// an ErrorUnsupported style diagnostic (using the provided Name).
3313  LValue EmitUnsupportedLValue(const Expr *E,
3314  const char *Name);
3315 
3316  /// EmitLValue - Emit code to compute a designator that specifies the location
3317  /// of the expression.
3318  ///
3319  /// This can return one of two things: a simple address or a bitfield
3320  /// reference. In either case, the LLVM Value* in the LValue structure is
3321  /// guaranteed to be an LLVM pointer type.
3322  ///
3323  /// If this returns a bitfield reference, nothing about the pointee type of
3324  /// the LLVM value is known: For example, it may not be a pointer to an
3325  /// integer.
3326  ///
3327  /// If this returns a normal address, and if the lvalue's C type is fixed
3328  /// size, this method guarantees that the returned pointer type will point to
3329  /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
3330  /// variable length type, this is not possible.
3331  ///
3332  LValue EmitLValue(const Expr *E);
3333 
3334  /// Same as EmitLValue but additionally we generate checking code to
3335  /// guard against undefined behavior. This is only suitable when we know
3336  /// that the address will be used to access the object.
3337  LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
3338 
3339  RValue convertTempToRValue(Address addr, QualType type,
3340  SourceLocation Loc);
3341 
3342  void EmitAtomicInit(Expr *E, LValue lvalue);
3343 
3344  bool LValueIsSuitableForInlineAtomic(LValue Src);
3345 
3346  RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
3347  AggValueSlot Slot = AggValueSlot::ignored());
3348 
3349  RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
3350  llvm::AtomicOrdering AO, bool IsVolatile = false,
3351  AggValueSlot slot = AggValueSlot::ignored());
3352 
3353  void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
3354 
3355  void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
3356  bool IsVolatile, bool isInit);
3357 
3358  std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
3359  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
3360  llvm::AtomicOrdering Success =
3361  llvm::AtomicOrdering::SequentiallyConsistent,
3362  llvm::AtomicOrdering Failure =
3363  llvm::AtomicOrdering::SequentiallyConsistent,
3364  bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
3365 
3366  void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
3367  const llvm::function_ref<RValue(RValue)> &UpdateOp,
3368  bool IsVolatile);
3369 
3370  /// EmitToMemory - Change a scalar value from its value
3371  /// representation to its in-memory representation.
3372  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
3373 
3374  /// EmitFromMemory - Change a scalar value from its memory
3375  /// representation to its value representation.
3376  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
3377 
3378  /// Check if the scalar \p Value is within the valid range for the given
3379  /// type \p Ty.
3380  ///
3381  /// Returns true if a check is needed (even if the range is unknown).
3382  bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
3383  SourceLocation Loc);
3384 
3385  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3386  /// care to appropriately convert from the memory representation to
3387  /// the LLVM value representation.
3388  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3389  SourceLocation Loc,
3391  bool isNontemporal = false) {
3392  return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
3393  CGM.getTBAAAccessInfo(Ty), isNontemporal);
3394  }
3395 
3396  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3397  SourceLocation Loc, LValueBaseInfo BaseInfo,
3398  TBAAAccessInfo TBAAInfo,
3399  bool isNontemporal = false);
3400 
3401  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3402  /// care to appropriately convert from the memory representation to
3403  /// the LLVM value representation. The l-value must be a simple
3404  /// l-value.
3405  llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
3406 
3407  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3408  /// care to appropriately convert from the memory representation to
3409  /// the LLVM value representation.
3411  bool Volatile, QualType Ty,
3413  bool isInit = false, bool isNontemporal = false) {
3414  EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
3415  CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
3416  }
3417 
3418  void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
3419  bool Volatile, QualType Ty,
3420  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
3421  bool isInit = false, bool isNontemporal = false);
3422 
3423  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3424  /// care to appropriately convert from the memory representation to
3425  /// the LLVM value representation. The l-value must be a simple
3426  /// l-value. The isInit flag indicates whether this is an initialization.
3427  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
3428  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
3429 
3430  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
3431  /// this method emits the address of the lvalue, then loads the result as an
3432  /// rvalue, returning the rvalue.
3433  RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
3434  RValue EmitLoadOfExtVectorElementLValue(LValue V);
3435  RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
3436  RValue EmitLoadOfGlobalRegLValue(LValue LV);
3437 
3438  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
3439  /// lvalue, where both are guaranteed to the have the same type, and that type
3440  /// is 'Ty'.
3441  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
3442  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
3443  void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
3444 
3445  /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
3446  /// as EmitStoreThroughLValue.
3447  ///
3448  /// \param Result [out] - If non-null, this will be set to a Value* for the
3449  /// bit-field contents after the store, appropriate for use as the result of
3450  /// an assignment to the bit-field.
3451  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
3452  llvm::Value **Result=nullptr);
3453 
3454  /// Emit an l-value for an assignment (simple or compound) of complex type.
3455  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
3456  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
3457  LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
3458  llvm::Value *&Result);
3459 
3460  // Note: only available for agg return types
3461  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
3462  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
3463  // Note: only available for agg return types
3464  LValue EmitCallExprLValue(const CallExpr *E);
3465  // Note: only available for agg return types
3466  LValue EmitVAArgExprLValue(const VAArgExpr *E);
3467  LValue EmitDeclRefLValue(const DeclRefExpr *E);
3468  LValue EmitStringLiteralLValue(const StringLiteral *E);
3469  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
3470  LValue EmitPredefinedLValue(const PredefinedExpr *E);
3471  LValue EmitUnaryOpLValue(const UnaryOperator *E);
3472  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
3473  bool Accessed = false);
3474  LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
3475  bool IsLowerBound = true);
3476  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
3477  LValue EmitMemberExpr(const MemberExpr *E);
3478  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
3479  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
3480  LValue EmitInitListLValue(const InitListExpr *E);
3481  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
3482  LValue EmitCastLValue(const CastExpr *E);
3483  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
3484  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
3485 
3486  Address EmitExtVectorElementLValue(LValue V);
3487 
3488  RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
3489 
3490  Address EmitArrayToPointerDecay(const Expr *Array,
3491  LValueBaseInfo *BaseInfo = nullptr,
3492  TBAAAccessInfo *TBAAInfo = nullptr);
3493 
3495  llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
3496  ConstantEmission(llvm::Constant *C, bool isReference)
3497  : ValueAndIsReference(C, isReference) {}
3498  public:
3500  static ConstantEmission forReference(llvm::Constant *C) {
3501  return ConstantEmission(C, true);
3502  }
3503  static ConstantEmission forValue(llvm::Constant *C) {
3504  return ConstantEmission(C, false);
3505  }
3506 
3507  explicit operator bool() const {
3508  return ValueAndIsReference.getOpaqueValue() != nullptr;
3509  }
3510 
3511  bool isReference() const { return ValueAndIsReference.getInt(); }
3512  LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
3513  assert(isReference());
3514  return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
3515  refExpr->getType());
3516  }
3517 
3518  llvm::Constant *getValue() const {
3519  assert(!isReference());
3520  return ValueAndIsReference.getPointer();
3521  }
3522  };
3523 
3524  ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
3525  ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
3526 
3527  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
3528  AggValueSlot slot = AggValueSlot::ignored());
3529  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
3530 
3531  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
3532  const ObjCIvarDecl *Ivar);
3533  LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
3534  LValue EmitLValueForLambdaField(const FieldDecl *Field);
3535 
3536  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
3537  /// if the Field is a reference, this will return the address of the reference
3538  /// and not the address of the value stored in the reference.
3539  LValue EmitLValueForFieldInitialization(LValue Base,
3540  const FieldDecl* Field);
3541 
3542  LValue EmitLValueForIvar(QualType ObjectTy,
3543  llvm::Value* Base, const ObjCIvarDecl *Ivar,
3544  unsigned CVRQualifiers);
3545 
3546  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
3547  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
3548  LValue EmitLambdaLValue(const LambdaExpr *E);
3549  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
3550  LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
3551 
3552  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
3553  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
3554  LValue EmitStmtExprLValue(const StmtExpr *E);
3555  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
3556  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
3557  void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
3558 
3559  //===--------------------------------------------------------------------===//
3560  // Scalar Expression Emission
3561  //===--------------------------------------------------------------------===//
3562 
3563  /// EmitCall - Generate a call of the given function, expecting the given
3564  /// result type, and using the given argument list which specifies both the
3565  /// LLVM arguments and the types they were derived from.
3566  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3567  ReturnValueSlot ReturnValue, const CallArgList &Args,
3568  llvm::Instruction **callOrInvoke, SourceLocation Loc);
3569  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3570  ReturnValueSlot ReturnValue, const CallArgList &Args,
3571  llvm::Instruction **callOrInvoke = nullptr) {
3572  return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
3573  SourceLocation());
3574  }
3575  RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
3576  ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
3577  RValue EmitCallExpr(const CallExpr *E,
3578  ReturnValueSlot ReturnValue = ReturnValueSlot());
3579  RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3580  CGCallee EmitCallee(const Expr *E);
3581 
3582  void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
3583 
3584  llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
3585  const Twine &name = "");
3586  llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
3588  const Twine &name = "");
3589  llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
3590  const Twine &name = "");
3591  llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
3593  const Twine &name = "");
3594 
3596  getBundlesForFunclet(llvm::Value *Callee);
3597 
3598  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
3600  const Twine &Name = "");
3601  llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
3603  const Twine &name = "");
3604  llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
3605  const Twine &name = "");
3606  void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3607  ArrayRef<llvm::Value*> args);
3608 
3610  NestedNameSpecifier *Qual,
3611  llvm::Type *Ty);
3612 
3613  CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
3614  CXXDtorType Type,
3615  const CXXRecordDecl *RD);
3616 
3617  // Return the copy constructor name with the prefix "__copy_constructor_"
3618  // removed.
3619  static std::string getNonTrivialCopyConstructorStr(QualType QT,
3620  CharUnits Alignment,
3621  bool IsVolatile,
3622  ASTContext &Ctx);
3623 
3624  // Return the destructor name with the prefix "__destructor_" removed.
3625  static std::string getNonTrivialDestructorStr(QualType QT,
3626  CharUnits Alignment,
3627  bool IsVolatile,
3628  ASTContext &Ctx);
3629 
3630  // These functions emit calls to the special functions of non-trivial C
3631  // structs.
3632  void defaultInitNonTrivialCStructVar(LValue Dst);
3633  void callCStructDefaultConstructor(LValue Dst);
3634  void callCStructDestructor(LValue Dst);
3635  void callCStructCopyConstructor(LValue Dst, LValue Src);
3636  void callCStructMoveConstructor(LValue Dst, LValue Src);
3637  void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
3638  void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
3639 
3640  RValue
3641  EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method,
3642  const CGCallee &Callee,
3643  ReturnValueSlot ReturnValue, llvm::Value *This,
3644  llvm::Value *ImplicitParam,
3645  QualType ImplicitParamTy, const CallExpr *E,
3646  CallArgList *RtlArgs);
3647  RValue EmitCXXDestructorCall(const CXXDestructorDecl *DD,
3648  const CGCallee &Callee,
3649  llvm::Value *This, llvm::Value *ImplicitParam,
3650  QualType ImplicitParamTy, const CallExpr *E,
3651  StructorType Type);
3652  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
3653  ReturnValueSlot ReturnValue);
3654  RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
3655  const CXXMethodDecl *MD,
3656  ReturnValueSlot ReturnValue,
3657  bool HasQualifier,
3658  NestedNameSpecifier *Qualifier,
3659  bool IsArrow, const Expr *Base);
3660  // Compute the object pointer.
3661  Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
3662  llvm::Value *memberPtr,
3663  const MemberPointerType *memberPtrType,
3664  LValueBaseInfo *BaseInfo = nullptr,
3665  TBAAAccessInfo *TBAAInfo = nullptr);
3666  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
3667  ReturnValueSlot ReturnValue);
3668 
3669  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
3670  const CXXMethodDecl *MD,
3671  ReturnValueSlot ReturnValue);
3672  RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
3673 
3674  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
3675  ReturnValueSlot ReturnValue);
3676 
3677  RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
3678  ReturnValueSlot ReturnValue);
3679 
3680  RValue EmitBuiltinExpr(const FunctionDecl *FD,
3681  unsigned BuiltinID, const CallExpr *E,
3682  ReturnValueSlot ReturnValue);
3683 
3684  /// Emit IR for __builtin_os_log_format.
3685  RValue emitBuiltinOSLogFormat(const CallExpr &E);
3686 
3687  llvm::Function *generateBuiltinOSLogHelperFunction(
3688  const analyze_os_log::OSLogBufferLayout &Layout,
3689  CharUnits BufferAlignment);
3690 
3691  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3692 
3693  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
3694  /// is unhandled by the current target.
3695  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3696 
3697  llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
3698  const llvm::CmpInst::Predicate Fp,
3699  const llvm::CmpInst::Predicate Ip,
3700  const llvm::Twine &Name = "");
3701  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3702  llvm::Triple::ArchType Arch);
3703 
3704  llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
3705  unsigned LLVMIntrinsic,
3706  unsigned AltLLVMIntrinsic,
3707  const char *NameHint,
3708  unsigned Modifier,
3709  const CallExpr *E,
3711  Address PtrOp0, Address PtrOp1,
3712  llvm::Triple::ArchType Arch);
3713 
3714  llvm::Value *EmitISOVolatileLoad(const CallExpr *E);
3715  llvm::Value *EmitISOVolatileStore(const CallExpr *E);
3716 
3717  llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
3718  unsigned Modifier, llvm::Type *ArgTy,
3719  const CallExpr *E);
3720  llvm::Value *EmitNeonCall(llvm::Function *F,
3722  const char *name,
3723  unsigned shift = 0, bool rightshift = false);
3724  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
3725  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
3726  bool negateForRightShift);
3727  llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
3728  llvm::Type *Ty, bool usgn, const char *name);
3729  llvm::Value *vectorWrapScalar16(llvm::Value *Op);
3730  llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3731  llvm::Triple::ArchType Arch);
3732 
3733  llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
3734  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3735  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3736  llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3737  llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3738  llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3739  llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
3740  const CallExpr *E);
3741  llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3742 
3743 private:
3744  enum class MSVCIntrin;
3745 
3746 public:
3747  llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
3748 
3749  llvm::Value *EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args);
3750 
3751  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
3752  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
3753  llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
3754  llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
3755  llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
3756  llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
3757  const ObjCMethodDecl *MethodWithObjects);
3758  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
3759  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
3760  ReturnValueSlot Return = ReturnValueSlot());
3761 
3762  /// Retrieves the default cleanup kind for an ARC cleanup.
3763  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
3765  return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
3767  }
3768 
3769  // ARC primitives.
3770  void EmitARCInitWeak(Address addr, llvm::Value *value);
3771  void EmitARCDestroyWeak(Address addr);
3772  llvm::Value *EmitARCLoadWeak(Address addr);
3773  llvm::Value *EmitARCLoadWeakRetained(Address addr);
3774  llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
3775  void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3776  void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3777  void EmitARCCopyWeak(Address dst, Address src);
3778  void EmitARCMoveWeak(Address dst, Address src);
3779  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
3780  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
3781  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
3782  bool resultIgnored);
3783  llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
3784  bool resultIgnored);
3785  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
3786  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
3787  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
3788  void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
3789  void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
3790  llvm::Value *EmitARCAutorelease(llvm::Value *value);
3791  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
3792  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
3793  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
3794  llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
3795 
3796  std::pair<LValue,llvm::Value*>
3797  EmitARCStoreAutoreleasing(const BinaryOperator *e);
3798  std::pair<LValue,llvm::Value*>
3799  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
3800  std::pair<LValue,llvm::Value*>
3801  EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
3802 
3803  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
3804  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
3805  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
3806 
3807  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
3808  llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
3809  bool allowUnsafeClaim);
3810  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
3811  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
3812  llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
3813 
3814  void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
3815 
3816  static Destroyer destroyARCStrongImprecise;
3817  static Destroyer destroyARCStrongPrecise;
3818  static Destroyer destroyARCWeak;
3819  static Destroyer emitARCIntrinsicUse;
3820  static Destroyer destroyNonTrivialCStruct;
3821 
3822  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
3823  llvm::Value *EmitObjCAutoreleasePoolPush();
3824  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
3825  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
3826  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
3827 
3828  /// Emits a reference binding to the passed in expression.
3829  RValue EmitReferenceBindingToExpr(const Expr *E);
3830 
3831  //===--------------------------------------------------------------------===//
3832  // Expression Emission
3833  //===--------------------------------------------------------------------===//
3834 
3835  // Expressions are broken into three classes: scalar, complex, aggregate.
3836 
3837  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
3838  /// scalar type, returning the result.
3839  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
3840 
3841  /// Emit a conversion from the specified type to the specified destination
3842  /// type, both of which are LLVM scalar types.
3843  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
3844  QualType DstTy, SourceLocation Loc);
3845 
3846  /// Emit a conversion from the specified complex type to the specified
3847  /// destination type, where the destination type is an LLVM scalar type.
3848  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
3849  QualType DstTy,
3850  SourceLocation Loc);
3851 
3852  /// EmitAggExpr - Emit the computation of the specified expression
3853  /// of aggregate type. The result is computed into the given slot,
3854  /// which may be null to indicate that the value is not needed.
3855  void EmitAggExpr(const Expr *E, AggValueSlot AS);
3856 
3857  /// EmitAggExprToLValue - Emit the computation of the specified expression of
3858  /// aggregate type into a temporary LValue.
3859  LValue EmitAggExprToLValue(const Expr *E);
3860 
3861  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3862  /// make sure it survives garbage collection until this point.
3863  void EmitExtendGCLifetime(llvm::Value *object);
3864 
3865  /// EmitComplexExpr - Emit the computation of the specified expression of
3866  /// complex type, returning the result.
3867  ComplexPairTy EmitComplexExpr(const Expr *E,
3868  bool IgnoreReal = false,
3869  bool IgnoreImag = false);
3870 
3871  /// EmitComplexExprIntoLValue - Emit the given expression of complex
3872  /// type and place its result into the specified l-value.
3873  void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
3874 
3875  /// EmitStoreOfComplex - Store a complex number into the specified l-value.
3876  void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
3877 
3878  /// EmitLoadOfComplex - Load a complex number from the specified l-value.
3879  ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
3880 
3881  Address emitAddrOfRealComponent(Address complex, QualType complexType);
3882  Address emitAddrOfImagComponent(Address complex, QualType complexType);
3883 
3884  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
3885  /// global variable that has already been created for it. If the initializer
3886  /// has a different type than GV does, this may free GV and return a different
3887  /// one. Otherwise it just returns GV.
3888  llvm::GlobalVariable *
3889  AddInitializerToStaticVarDecl(const VarDecl &D,
3890  llvm::GlobalVariable *GV);
3891 
3892 
3893  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
3894  /// variable with global storage.
3895  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
3896  bool PerformInit);
3897 
3898  llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::Constant *Dtor,
3899  llvm::Constant *Addr);
3900 
3901  /// Call atexit() with a function that passes the given argument to
3902  /// the given function.
3903  void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
3904  llvm::Constant *addr);
3905 
3906  /// Call atexit() with function dtorStub.
3907  void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
3908 
3909  /// Emit code in this function to perform a guarded variable
3910  /// initialization. Guarded initializations are used when it's not
3911  /// possible to prove that an initialization will be done exactly
3912  /// once, e.g. with a static local variable or a static data member
3913  /// of a class template.
3914  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
3915  bool PerformInit);
3916 
3917  enum class GuardKind { VariableGuard, TlsGuard };
3918 
3919  /// Emit a branch to select whether or not to perform guarded initialization.
3920  void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
3921  llvm::BasicBlock *InitBlock,
3922  llvm::BasicBlock *NoInitBlock,
3923  GuardKind Kind, const VarDecl *D);
3924 
3925  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
3926  /// variables.
3927  void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
3928  ArrayRef<llvm::Function *> CXXThreadLocals,
3929  Address Guard = Address::invalid());
3930 
3931  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
3932  /// variables.
3933  void GenerateCXXGlobalDtorsFunc(
3934  llvm::Function *Fn,
3935  const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
3936  &DtorsAndObjects);
3937 
3938  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
3939  const VarDecl *D,
3940  llvm::GlobalVariable *Addr,
3941  bool PerformInit);
3942 
3943  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
3944 
3945  void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
3946 
3948  if (E->getNumObjects() == 0) return;
3949  enterNonTrivialFullExpression(E);
3950  }
3951  void enterNonTrivialFullExpression(const ExprWithCleanups *E);
3952 
3953  void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
3954 
3955  void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
3956 
3957  RValue EmitAtomicExpr(AtomicExpr *E);
3958 
3959  //===--------------------------------------------------------------------===//
3960  // Annotations Emission
3961  //===--------------------------------------------------------------------===//
3962 
3963  /// Emit an annotation call (intrinsic or builtin).
3964  llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
3965  llvm::Value *AnnotatedVal,
3966  StringRef AnnotationStr,
3967  SourceLocation Location);
3968 
3969  /// Emit local annotations for the local variable V, declared by D.
3970  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
3971 
3972  /// Emit field annotations for the given field & value. Returns the
3973  /// annotation result.
3974  Address EmitFieldAnnotations(const FieldDecl *D, Address V);
3975 
3976  //===--------------------------------------------------------------------===//
3977  // Internal Helpers
3978  //===--------------------------------------------------------------------===//
3979 
3980  /// ContainsLabel - Return true if the statement contains a label in it. If
3981  /// this statement is not executed normally, it not containing a label means
3982  /// that we can just remove the code.
3983  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
3984 
3985  /// containsBreak - Return true if the statement contains a break out of it.
3986  /// If the statement (recursively) contains a switch or loop with a break
3987  /// inside of it, this is fine.
3988  static bool containsBreak(const Stmt *S);
3989 
3990  /// Determine if the given statement might introduce a declaration into the
3991  /// current scope, by being a (possibly-labelled) DeclStmt.
3992  static bool mightAddDeclToScope(const Stmt *S);
3993 
3994  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
3995  /// to a constant, or if it does but contains a label, return false. If it
3996  /// constant folds return true and set the boolean result in Result.
3997  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
3998  bool AllowLabels = false);
3999 
4000  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4001  /// to a constant, or if it does but contains a label, return false. If it
4002  /// constant folds return true and set the folded value.
4003  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
4004  bool AllowLabels = false);
4005 
4006  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
4007  /// if statement) to the specified blocks. Based on the condition, this might
4008  /// try to simplify the codegen of the conditional based on the branch.
4009  /// TrueCount should be the number of times we expect the condition to
4010  /// evaluate to true based on PGO data.
4011  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
4012  llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
4013 
4014  /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
4015  /// nonnull, if \p LHS is marked _Nonnull.
4016  void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
4017 
4018  /// An enumeration which makes it easier to specify whether or not an
4019  /// operation is a subtraction.
4020  enum { NotSubtraction = false, IsSubtraction = true };
4021 
4022  /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
4023  /// detect undefined behavior when the pointer overflow sanitizer is enabled.
4024  /// \p SignedIndices indicates whether any of the GEP indices are signed.
4025  /// \p IsSubtraction indicates whether the expression used to form the GEP
4026  /// is a subtraction.
4027  llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
4028  ArrayRef<llvm::Value *> IdxList,
4029  bool SignedIndices,
4030  bool IsSubtraction,
4031  SourceLocation Loc,
4032  const Twine &Name = "");
4033 
4034  /// Specifies which type of sanitizer check to apply when handling a
4035  /// particular builtin.
4039  };
4040 
4041  /// Emits an argument for a call to a builtin. If the builtin sanitizer is
4042  /// enabled, a runtime check specified by \p Kind is also emitted.
4043  llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
4044 
4045  /// Emit a description of a type in a format suitable for passing to
4046  /// a runtime sanitizer handler.
4047  llvm::Constant *EmitCheckTypeDescriptor(QualType T);
4048 
4049  /// Convert a value into a format suitable for passing to a runtime
4050  /// sanitizer handler.
4051  llvm::Value *EmitCheckValue(llvm::Value *V);
4052 
4053  /// Emit a description of a source location in a format suitable for
4054  /// passing to a runtime sanitizer handler.
4055  llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
4056 
4057  /// Create a basic block that will call a handler function in a
4058  /// sanitizer runtime with the provided arguments, and create a conditional
4059  /// branch to it.
4060  void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
4061  SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
4062  ArrayRef<llvm::Value *> DynamicArgs);
4063 
4064  /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
4065  /// if Cond if false.
4066  void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
4067  llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4068  ArrayRef<llvm::Constant *> StaticArgs);
4069 
4070  /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
4071  /// checking is enabled. Otherwise, just emit an unreachable instruction.
4072  void EmitUnreachable(SourceLocation Loc);
4073 
4074  /// Create a basic block that will call the trap intrinsic, and emit a
4075  /// conditional branch to it, for the -ftrapv checks.
4076  void EmitTrapCheck(llvm::Value *Checked);
4077 
4078  /// Emit a call to trap or debugtrap and attach function attribute
4079  /// "trap-func-name" if specified.
4080  llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
4081 
4082  /// Emit a stub for the cross-DSO CFI check function.
4083  void EmitCfiCheckStub();
4084 
4085  /// Emit a cross-DSO CFI failure handling function.
4086  void EmitCfiCheckFail();
4087 
4088  /// Create a check for a function parameter that may potentially be
4089  /// declared as non-null.
4090  void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
4091  AbstractCallee AC, unsigned ParmNum);
4092 
4093  /// EmitCallArg - Emit a single call argument.
4094  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
4095 
4096  /// EmitDelegateCallArg - We are performing a delegate call; that
4097  /// is, the current function is delegating to another one. Produce
4098  /// a r-value suitable for passing the given parameter.
4099  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
4100  SourceLocation loc);
4101 
4102  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
4103  /// point operation, expressed as the maximum relative error in ulp.
4104  void SetFPAccuracy(llvm::Value *Val, float Accuracy);
4105 
4106 private:
4107  llvm::MDNode *getRangeForLoadFromType(QualType Ty);
4108  void EmitReturnOfRValue(RValue RV, QualType Ty);
4109 
4110  void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
4111 
4113  DeferredReplacements;
4114 
4115  /// Set the address of a local variable.
4116  void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
4117  assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
4118  LocalDeclMap.insert({VD, Addr});
4119  }
4120 
4121  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
4122  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
4123  ///
4124  /// \param AI - The first function argument of the expansion.
4125  void ExpandTypeFromArgs(QualType Ty, LValue Dst,
4127 
4128  /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
4129  /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
4130  /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
4131  void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
4132  SmallVectorImpl<llvm::Value *> &IRCallArgs,
4133  unsigned &IRCallArgPos);
4134 
4135  llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
4136  const Expr *InputExpr, std::string &ConstraintStr);
4137 
4138  llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
4139  LValue InputValue, QualType InputType,
4140  std::string &ConstraintStr,
4141  SourceLocation Loc);
4142 
4143  /// Attempts to statically evaluate the object size of E. If that
4144  /// fails, emits code to figure the size of E out for us. This is
4145  /// pass_object_size aware.
4146  ///
4147  /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
4148  llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
4149  llvm::IntegerType *ResType,
4150  llvm::Value *EmittedE);
4151 
4152  /// Emits the size of E, as required by __builtin_object_size. This
4153  /// function is aware of pass_object_size parameters, and will act accordingly
4154  /// if E is a parameter with the pass_object_size attribute.
4155  llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
4156  llvm::IntegerType *ResType,
4157  llvm::Value *EmittedE);
4158 
4159 public:
4160 #ifndef NDEBUG
4161  // Determine whether the given argument is an Objective-C method
4162  // that may have type parameters in its signature.
4163  static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4164  const DeclContext *dc = method->getDeclContext();
4165  if (const ObjCInterfaceDecl *classDecl= dyn_cast<ObjCInterfaceDecl>(dc)) {
4166  return classDecl->getTypeParamListAsWritten();
4167  }
4168 
4169  if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4170  return catDecl->getTypeParamList();
4171  }
4172 
4173  return false;
4174  }
4175 
4176  template<typename T>
4177  static bool isObjCMethodWithTypeParams(const T *) { return false; }
4178 #endif
4179 
4180  enum class EvaluationOrder {
4181  ///! No language constraints on evaluation order.
4182  Default,
4183  ///! Language semantics require left-to-right evaluation.
4184  ForceLeftToRight,
4185  ///! Language semantics require right-to-left evaluation.
4186  ForceRightToLeft
4187  };
4188 
4189  /// EmitCallArgs - Emit call arguments for a function.
4190  template <typename T>
4191  void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
4192  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4194  unsigned ParamsToSkip = 0,
4195  EvaluationOrder Order = EvaluationOrder::Default) {
4196  SmallVector<QualType, 16> ArgTypes;
4197  CallExpr::const_arg_iterator Arg = ArgRange.begin();
4198 
4199  assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
4200  "Can't skip parameters if type info is not provided");
4201  if (CallArgTypeInfo) {
4202 #ifndef NDEBUG
4203  bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo);
4204 #endif
4205 
4206  // First, use the argument types that the type info knows about
4207  for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
4208  E = CallArgTypeInfo->param_type_end();
4209  I != E; ++I, ++Arg) {
4210  assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4211  assert((isGenericMethod ||
4212  ((*I)->isVariablyModifiedType() ||
4213  (*I).getNonReferenceType()->isObjCRetainableType() ||
4214  getContext()
4215  .getCanonicalType((*I).getNonReferenceType())
4216  .getTypePtr() ==
4217  getContext()
4218  .getCanonicalType((*Arg)->getType())
4219  .getTypePtr())) &&
4220  "type mismatch in call argument!");
4221  ArgTypes.push_back(*I);
4222  }
4223  }
4224 
4225  // Either we've emitted all the call args, or we have a call to variadic
4226  // function.
4227  assert((Arg == ArgRange.end() || !CallArgTypeInfo ||
4228  CallArgTypeInfo->isVariadic()) &&
4229  "Extra arguments in non-variadic function!");
4230 
4231  // If we still have any arguments, emit them using the type of the argument.
4232  for (auto *A : llvm::make_range(Arg, ArgRange.end()))
4233  ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType());
4234 
4235  EmitCallArgs(Args, ArgTypes, ArgRange, AC, ParamsToSkip, Order);
4236  }
4237 
4238  void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
4239  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4241  unsigned ParamsToSkip = 0,
4242  EvaluationOrder Order = EvaluationOrder::Default);
4243 
4244  /// EmitPointerWithAlignment - Given an expression with a pointer type,
4245  /// emit the value and compute our best estimate of the alignment of the
4246  /// pointee.
4247  ///
4248  /// \param BaseInfo - If non-null, this will be initialized with
4249  /// information about the source of the alignment and the may-alias
4250  /// attribute. Note that this function will conservatively fall back on
4251  /// the type when it doesn't recognize the expression and may-alias will
4252  /// be set to false.
4253  ///
4254  /// One reasonable way to use this information is when there's a language
4255  /// guarantee that the pointer must be aligned to some stricter value, and
4256  /// we're simply trying to ensure that sufficiently obvious uses of under-
4257  /// aligned objects don't get miscompiled; for example, a placement new
4258  /// into the address of a local variable. In such a case, it's quite
4259  /// reasonable to just ignore the returned alignment when it isn't from an
4260  /// explicit source.
4261  Address EmitPointerWithAlignment(const Expr *Addr,
4262  LValueBaseInfo *BaseInfo = nullptr,
4263  TBAAAccessInfo *TBAAInfo = nullptr);
4264 
4265  /// If \p E references a parameter with pass_object_size info or a constant
4266  /// array size modifier, emit the object size divided by the size of \p EltTy.
4267  /// Otherwise return null.
4268  llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
4269 
4270  void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
4271 
4273  llvm::Function *Function;
4274  TargetAttr::ParsedTargetAttr ParsedAttribute;
4275  unsigned Priority;
4277  const TargetInfo &TargInfo, llvm::Function *F,
4278  const clang::TargetAttr::ParsedTargetAttr &PT)
4279  : Function(F), ParsedAttribute(PT), Priority(0u) {
4280  for (StringRef Feat : PT.Features)
4281  Priority = std::max(Priority,
4282  TargInfo.multiVersionSortPriority(Feat.substr(1)));
4283 
4284  if (!PT.Architecture.empty())
4285  Priority = std::max(Priority,
4286  TargInfo.multiVersionSortPriority(PT.Architecture));
4287  }
4288 
4289  bool operator>(const TargetMultiVersionResolverOption &Other) const {
4290  return Priority > Other.Priority;
4291  }
4292  };
4293  void EmitTargetMultiVersionResolver(
4294  llvm::Function *Resolver,
4296 
4298  llvm::Function *Function;
4299  // Note: EmitX86CPUSupports only has 32 bits available, so we store the mask
4300  // as 32 bits here. When 64-bit support is added to __builtin_cpu_supports,
4301  // this can be extended to 64 bits.
4302  uint32_t FeatureMask;
4303  CPUDispatchMultiVersionResolverOption(llvm::Function *F, uint64_t Mask)
4304  : Function(F), FeatureMask(static_cast<uint32_t>(Mask)) {}
4306  return FeatureMask > Other.FeatureMask;
4307  }
4308  };
4309  void EmitCPUDispatchMultiVersionResolver(
4310  llvm::Function *Resolver,
4312  static uint32_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
4313 
4314 private:
4315  QualType getVarArgType(const Expr *Arg);
4316 
4317  void EmitDeclMetadata();
4318 
4319  BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
4320  const AutoVarEmission &emission);
4321 
4322  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
4323 
4324  llvm::Value *GetValueForARMHint(unsigned BuiltinID);
4325  llvm::Value *EmitX86CpuIs(const CallExpr *E);
4326  llvm::Value *EmitX86CpuIs(StringRef CPUStr);
4327  llvm::Value *EmitX86CpuSupports(const CallExpr *E);
4328  llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
4329  llvm::Value *EmitX86CpuSupports(uint32_t Mask);
4330  llvm::Value *EmitX86CpuInit();
4331  llvm::Value *
4332  FormResolverCondition(const TargetMultiVersionResolverOption &RO);
4333 };
4334 
4336 DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
4337  if (!needsSaving(value)) return saved_type(value, false);
4338 
4339  // Otherwise, we need an alloca.
4340  auto align = CharUnits::fromQuantity(
4341  CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
4342  Address alloca =
4343  CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
4344  CGF.Builder.CreateStore(value, alloca);
4345 
4346  return saved_type(alloca.getPointer(), true);
4347 }
4348 
4349 inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
4350  saved_type value) {
4351  // If the value says it wasn't saved, trust that it's still dominating.
4352  if (!value.getInt()) return value.getPointer();
4353 
4354  // Otherwise, it should be an alloca instruction, as set up in save().
4355  auto alloca = cast<llvm::AllocaInst>(value.getPointer());
4356  return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
4357 }
4358 
4359 } // end namespace CodeGen
4360 } // end namespace clang
4361 
4362 #endif
const llvm::DataLayout & getDataLayout() const
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:78
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:361
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
Optional< uint64_t > getStmtCount(const Stmt *S)
Check if an execution count is known for a given statement.
Definition: CodeGenPGO.h:64
This represents &#39;#pragma omp distribute simd&#39; composite directive.
Definition: StmtOpenMP.h:3222
Information about the layout of a __block variable.
Definition: CGBlocks.h:144
This represents &#39;#pragma omp master&#39; directive.
Definition: StmtOpenMP.h:1405
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
This represents &#39;#pragma omp task&#39; directive.
Definition: StmtOpenMP.h:1745
Represents a function declaration or definition.
Definition: Decl.h:1722
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2497
Scheduling data for loop-based OpenMP directives.
Definition: OpenMPKinds.h:124
A (possibly-)qualified type.
Definition: Type.h:641
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Type *Ty, const CXXRecordDecl *RD)
Definition: CGCXX.cpp:258
const CodeGenOptions & getCodeGenOpts() const
The class detects jumps which bypass local variables declaration: goto L; int a; L: ...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:126
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::LLVMContext & getLLVMContext()
DominatorTree GraphTraits specialization so the DominatorTree can be iterable by generic graph iterat...
Definition: Dominators.h:30
FieldConstructionScope(CodeGenFunction &CGF, Address This)
Represents a &#39;co_return&#39; statement in the C++ Coroutines TS.
Definition: StmtCXX.h:472
Stmt - This represents one statement.
Definition: Stmt.h:66
IfStmt - This represents an if/then/else.
Definition: Stmt.h:1031
static T * buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo, T &&generator)
Lazily build the copy and dispose helpers for a __block variable with the given information.
Definition: CGBlocks.cpp:2542
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
C Language Family Type Representation.
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it&#39;s the GNU ...
This represents &#39;#pragma omp for simd&#39; directive.
Definition: StmtOpenMP.h:1155
Checking the &#39;this&#39; pointer for a constructor call.
bool hasVolatileMember() const
Definition: Decl.h:3656
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
This represents &#39;#pragma omp teams distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3633
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
static bool classof(const CGCapturedStmtInfo *)
Represents an attribute applied to a statement.
Definition: Stmt.h:969
static Destroyer destroyARCStrongPrecise
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of &#39;this&#39;.
The base class of the type hierarchy.
Definition: Type.h:1414
This represents &#39;#pragma omp target teams distribute&#39; combined directive.
Definition: StmtOpenMP.h:3770
Represents Objective-C&#39;s @throw statement.
Definition: StmtObjC.h:353
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition: Stmt.h:2403
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2772
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1382
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:379
DominatingValue< T >::saved_type saveValueInCond(T value)
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const ParmVarDecl * getParamDecl(unsigned I) const
This represents &#39;#pragma omp parallel for&#39; directive.
Definition: StmtOpenMP.h:1526
void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S, llvm::Value *StepV)
Definition: CodeGenPGO.cpp:886
This represents &#39;#pragma omp target teams distribute parallel for&#39; combined directive.
Definition: StmtOpenMP.h:3838
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2483
Represents a prvalue temporary that is written into memory so that a reference can bind to it...
Definition: ExprCXX.h:4393
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
static type restore(CodeGenFunction &CGF, saved_type value)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
Represents a point when we exit a loop.
Definition: ProgramPoint.h:687
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3017
This represents &#39;#pragma omp target exit data&#39; directive.
Definition: StmtOpenMP.h:2437
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:820
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC &#39;id&#39; type.
Definition: ExprObjC.h:1533
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:2854
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6625
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:54
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:139
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
llvm::Value * getPointer() const
Definition: Address.h:38
static ConstantEmission forValue(llvm::Constant *C)
capture_iterator capture_begin()
Retrieve an iterator pointing to the first capture.
Definition: Stmt.h:2428
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1094
Represents an expression – generally a full-expression – that introduces cleanups to be run at the ...
Definition: ExprCXX.h:3269
Represents a parameter to a function.
Definition: Decl.h:1541
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have...
Definition: Linkage.h:24
Defines the clang::Expr interface and subclasses for C++ expressions.
The collection of all-type qualifiers we support.
Definition: Type.h:140
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:920
Represents a struct/union/class.
Definition: Decl.h:3572
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:198
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
void setScopeDepth(EHScopeStack::stable_iterator depth)
This represents &#39;#pragma omp parallel&#39; directive.
Definition: StmtOpenMP.h:284
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:153
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
Represents a member of a struct/union/class.
Definition: Decl.h:2554
Definition: Format.h:2031
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
bool isReferenceType() const
Definition: Type.h:6224
Helper class with most of the code for saving a value for a conditional expression cleanup...
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code...
This represents &#39;#pragma omp target simd&#39; directive.
Definition: StmtOpenMP.h:3358
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:942
Defines some OpenMP-specific enums and functions.
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:5234
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself...
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function...
Definition: EHScopeStack.h:66
This represents &#39;#pragma omp barrier&#39; directive.
Definition: StmtOpenMP.h:1857
CleanupKind getCleanupKind(QualType::DestructionKind kind)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:50
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp, [NSNumber numberWithInt:42]];.
Definition: ExprObjC.h:195
The this pointer adjustment as well as an optional return adjustment for a thunk. ...
Definition: ABI.h:179
This is a common base class for loop directives (&#39;omp simd&#39;, &#39;omp for&#39;, &#39;omp for simd&#39; etc...
Definition: StmtOpenMP.h:346
This represents &#39;#pragma omp critical&#39; directive.
Definition: StmtOpenMP.h:1452
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:194
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
OpenMPDistScheduleClauseKind
OpenMP attributes for &#39;dist_schedule&#39; clause.
Definition: OpenMPKinds.h:100
bool isGLValue() const
Definition: Expr.h:252
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:1459
Describes an C or C++ initializer list.
Definition: Expr.h:4236
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:707
This represents &#39;#pragma omp distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3073
void setCurrentRegionCount(uint64_t Count)
Set the counter value for the current region.
Definition: CodeGenPGO.h:60
A class controlling the emission of a finally block.
This represents &#39;#pragma omp teams distribute parallel for simd&#39; composite directive.
Definition: StmtOpenMP.h:3562
BinaryOperatorKind
static bool hasScalarEvaluationKind(QualType T)
ForStmt - This represents a &#39;for (init;cond;inc)&#39; stmt.
Definition: Stmt.h:1338
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:966
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::function_ref< std::pair< LValue, LValue > CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
LexicalScope(CodeGenFunction &CGF, SourceRange Range)
Enter a new cleanup scope.
RAII for correct setting/restoring of CapturedStmtInfo.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
Represents a declaration of a type.
Definition: Decl.h:2849
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3263
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind...
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
CXXForRangeStmt - This represents C++0x [stmt.ranged]&#39;s ranged for statement, represented as &#39;for (ra...
Definition: StmtCXX.h:142
#define LIST_SANITIZER_CHECKS
This represents &#39;#pragma omp cancellation point&#39; directive.
Definition: StmtOpenMP.h:2692
bool operator>(const TargetMultiVersionResolverOption &Other) const
TargetMultiVersionResolverOption(const TargetInfo &TargInfo, llvm::Function *F, const clang::TargetAttr::ParsedTargetAttr &PT)
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:51
field_iterator field_begin() const
Definition: Decl.cpp:4107
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:100
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
This represents &#39;#pragma omp teams&#39; directive.
Definition: StmtOpenMP.h:2635
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
Enums/classes describing ABI related information about constructors, destructors and thunks...
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:2935
This represents &#39;#pragma omp teams distribute simd&#39; combined directive.
Definition: StmtOpenMP.h:3492
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1329
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
Definition: ExprCXX.h:1763
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
Controls insertion of cancellation exit blocks in worksharing constructs.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
CallLifetimeEnd(Address addr, llvm::Value *size)
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * > CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:106
Represents an ObjC class declaration.
Definition: DeclObjC.h:1169
Checking the operand of a cast to a virtual base object.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
Checking the operand of a load. Must be suitably sized and aligned.
~LexicalScope()
Exit this cleanup scope, emitting any accumulated cleanups.
Checking the &#39;this&#39; pointer for a call to a non-static member function.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2756
This represents &#39;#pragma omp target parallel for simd&#39; directive.
Definition: StmtOpenMP.h:3290
OpenMP 4.0 [2.4, Array Sections].
Definition: ExprOpenMP.h:45
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:359
bool isValid() const
Definition: Address.h:36
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2416
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:637
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3536
Describes the capture of either a variable, or &#39;this&#39;, or variable-length array type.
Definition: Stmt.h:2298
void EmitAlignmentAssumption(llvm::Value *PtrValue, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
This represents &#39;#pragma omp taskgroup&#39; directive.
Definition: StmtOpenMP.h:1945
const TargetCodeGenInfo & getTargetCodeGenInfo()
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:153
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
The class used to assign some variables some temporarily addresses.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4134
AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression.
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
This represents &#39;#pragma omp distribute&#39; directive.
Definition: StmtOpenMP.h:2946
Exposes information about the current target.
Definition: TargetInfo.h:54
CXXDtorType
C++ destructor types.
Definition: ABI.h:34
bool addPrivate(const VarDecl *LocalVD, const llvm::function_ref< Address()> PrivateGen)
Registers LocalVD variable as a private and apply PrivateGen function for it to generate correspondin...
EHScopeStack::stable_iterator getScopeDepth() const
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:640
Expr - This represents one expression.
Definition: Expr.h:106
Address getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups...
Definition: EHScopeStack.h:356
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
static ParamValue forIndirect(Address addr)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:5303
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2705
This represents &#39;#pragma omp target teams distribute parallel for simd&#39; combined directive.
Definition: StmtOpenMP.h:3922
static saved_type save(CodeGenFunction &CGF, type value)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp")
CreateAggTemp - Create a temporary memory object for the given aggregate type.
#define bool
Definition: stdbool.h:31
unsigned Kind
The kind of cleanup to push: a value from the CleanupKind enumeration.
unsigned Size
The size of the following cleanup object.
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:320
DeclContext * getDeclContext()
Definition: DeclBase.h:434