clang  7.0.0svn
CodeGenFunction.h
Go to the documentation of this file.
1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the internal per-function state used for llvm translation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15 #define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
16 
17 #include "CGBuilder.h"
18 #include "CGDebugInfo.h"
19 #include "CGLoopInfo.h"
20 #include "CGValue.h"
21 #include "CodeGenModule.h"
22 #include "CodeGenPGO.h"
23 #include "EHScopeStack.h"
24 #include "VarBypassDetector.h"
25 #include "clang/AST/CharUnits.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/Type.h"
30 #include "clang/Basic/ABI.h"
33 #include "clang/Basic/TargetInfo.h"
35 #include "llvm/ADT/ArrayRef.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/MapVector.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/IR/ValueHandle.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Transforms/Utils/SanitizerStats.h"
42 
43 namespace llvm {
44 class BasicBlock;
45 class LLVMContext;
46 class MDNode;
47 class Module;
48 class SwitchInst;
49 class Twine;
50 class Value;
51 class CallSite;
52 }
53 
54 namespace clang {
55 class ASTContext;
56 class BlockDecl;
57 class CXXDestructorDecl;
58 class CXXForRangeStmt;
59 class CXXTryStmt;
60 class Decl;
61 class LabelDecl;
62 class EnumConstantDecl;
63 class FunctionDecl;
64 class FunctionProtoType;
65 class LabelStmt;
66 class ObjCContainerDecl;
67 class ObjCInterfaceDecl;
68 class ObjCIvarDecl;
69 class ObjCMethodDecl;
70 class ObjCImplementationDecl;
71 class ObjCPropertyImplDecl;
72 class TargetInfo;
73 class VarDecl;
74 class ObjCForCollectionStmt;
75 class ObjCAtTryStmt;
76 class ObjCAtThrowStmt;
77 class ObjCAtSynchronizedStmt;
78 class ObjCAutoreleasePoolStmt;
79 
80 namespace analyze_os_log {
81 class OSLogBufferLayout;
82 }
83 
84 namespace CodeGen {
85 class CodeGenTypes;
86 class CGCallee;
87 class CGFunctionInfo;
88 class CGRecordLayout;
89 class CGBlockInfo;
90 class CGCXXABI;
91 class BlockByrefHelpers;
92 class BlockByrefInfo;
93 class BlockFlags;
94 class BlockFieldFlags;
95 class RegionCodeGenTy;
96 class TargetCodeGenInfo;
97 struct OMPTaskDataTy;
98 struct CGCoroData;
99 
100 /// The kind of evaluation to perform on values of a particular
101 /// type. Basically, is the code in CGExprScalar, CGExprComplex, or
102 /// CGExprAgg?
103 ///
104 /// TODO: should vectors maybe be split out into their own thing?
109 };
110 
111 #define LIST_SANITIZER_CHECKS \
112  SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
113  SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
114  SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
115  SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
116  SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
117  SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
118  SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
119  SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
120  SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
121  SANITIZER_CHECK(MissingReturn, missing_return, 0) \
122  SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
123  SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
124  SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
125  SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
126  SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
127  SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
128  SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
129  SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
130  SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
131  SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
132  SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
133  SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
134 
136 #define SANITIZER_CHECK(Enum, Name, Version) Enum,
138 #undef SANITIZER_CHECK
139 };
140 
141 /// CodeGenFunction - This class organizes the per-function state that is used
142 /// while generating LLVM code.
144  CodeGenFunction(const CodeGenFunction &) = delete;
145  void operator=(const CodeGenFunction &) = delete;
146 
147  friend class CGCXXABI;
148 public:
149  /// A jump destination is an abstract label, branching to which may
150  /// require a jump out through normal cleanups.
151  struct JumpDest {
152  JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
153  JumpDest(llvm::BasicBlock *Block,
155  unsigned Index)
156  : Block(Block), ScopeDepth(Depth), Index(Index) {}
157 
158  bool isValid() const { return Block != nullptr; }
159  llvm::BasicBlock *getBlock() const { return Block; }
160  EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
161  unsigned getDestIndex() const { return Index; }
162 
163  // This should be used cautiously.
165  ScopeDepth = depth;
166  }
167 
168  private:
169  llvm::BasicBlock *Block;
171  unsigned Index;
172  };
173 
174  CodeGenModule &CGM; // Per-module state.
176 
177  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
180 
181  // Stores variables for which we can't generate correct lifetime markers
182  // because of jumps.
184 
185  // CodeGen lambda for loops and support for ordered clause
186  typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
187  JumpDest)>
189  typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
190  const unsigned, const bool)>
192 
193  // Codegen lambda for loop bounds in worksharing loop constructs
194  typedef llvm::function_ref<std::pair<LValue, LValue>(
197 
198  // Codegen lambda for loop bounds in dispatch-based loop implementation
199  typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
200  CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
201  Address UB)>
203 
204  /// CGBuilder insert helper. This function is called after an
205  /// instruction is created using Builder.
206  void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
207  llvm::BasicBlock *BB,
208  llvm::BasicBlock::iterator InsertPt) const;
209 
210  /// CurFuncDecl - Holds the Decl for the current outermost
211  /// non-closure context.
213  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
217  llvm::Function *CurFn = nullptr;
218 
219  // Holds coroutine data if the current function is a coroutine. We use a
220  // wrapper to manage its lifetime, so that we don't have to define CGCoroData
221  // in this header.
222  struct CGCoroInfo {
223  std::unique_ptr<CGCoroData> Data;
224  CGCoroInfo();
225  ~CGCoroInfo();
226  };
228 
229  bool isCoroutine() const {
230  return CurCoro.Data != nullptr;
231  }
232 
233  /// CurGD - The GlobalDecl for the current function being compiled.
235 
236  /// PrologueCleanupDepth - The cleanup depth enclosing all the
237  /// cleanups associated with the parameters.
239 
240  /// ReturnBlock - Unified return block.
242 
243  /// ReturnValue - The temporary alloca to hold the return
244  /// value. This is invalid iff the function has no return value.
245  Address ReturnValue = Address::invalid();
246 
247  /// Return true if a label was seen in the current scope.
249  if (CurLexicalScope)
250  return CurLexicalScope->hasLabels();
251  return !LabelMap.empty();
252  }
253 
254  /// AllocaInsertPoint - This is an instruction in the entry block before which
255  /// we prefer to insert allocas.
256  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
257 
258  /// API for captured statement code generation.
260  public:
262  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
263  explicit CGCapturedStmtInfo(const CapturedStmt &S,
265  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
266 
270  E = S.capture_end();
271  I != E; ++I, ++Field) {
272  if (I->capturesThis())
273  CXXThisFieldDecl = *Field;
274  else if (I->capturesVariable())
275  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
276  else if (I->capturesVariableByCopy())
277  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
278  }
279  }
280 
281  virtual ~CGCapturedStmtInfo();
282 
283  CapturedRegionKind getKind() const { return Kind; }
284 
285  virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
286  // Retrieve the value of the context parameter.
287  virtual llvm::Value *getContextValue() const { return ThisValue; }
288 
289  /// Lookup the captured field decl for a variable.
290  virtual const FieldDecl *lookup(const VarDecl *VD) const {
291  return CaptureFields.lookup(VD->getCanonicalDecl());
292  }
293 
294  bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
295  virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
296 
297  static bool classof(const CGCapturedStmtInfo *) {
298  return true;
299  }
300 
301  /// Emit the captured statement body.
302  virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
304  CGF.EmitStmt(S);
305  }
306 
307  /// Get the name of the capture helper.
308  virtual StringRef getHelperName() const { return "__captured_stmt"; }
309 
310  private:
311  /// The kind of captured statement being generated.
313 
314  /// Keep the map between VarDecl and FieldDecl.
315  llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
316 
317  /// The base address of the captured record, passed in as the first
318  /// argument of the parallel region function.
319  llvm::Value *ThisValue;
320 
321  /// Captured 'this' type.
322  FieldDecl *CXXThisFieldDecl;
323  };
324  CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
325 
326  /// RAII for correct setting/restoring of CapturedStmtInfo.
328  private:
329  CodeGenFunction &CGF;
330  CGCapturedStmtInfo *PrevCapturedStmtInfo;
331  public:
332  CGCapturedStmtRAII(CodeGenFunction &CGF,
333  CGCapturedStmtInfo *NewCapturedStmtInfo)
334  : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
335  CGF.CapturedStmtInfo = NewCapturedStmtInfo;
336  }
337  ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
338  };
339 
340  /// An abstract representation of regular/ObjC call/message targets.
342  /// The function declaration of the callee.
343  const Decl *CalleeDecl;
344 
345  public:
346  AbstractCallee() : CalleeDecl(nullptr) {}
347  AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
348  AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
349  bool hasFunctionDecl() const {
350  return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
351  }
352  const Decl *getDecl() const { return CalleeDecl; }
353  unsigned getNumParams() const {
354  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
355  return FD->getNumParams();
356  return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
357  }
358  const ParmVarDecl *getParamDecl(unsigned I) const {
359  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
360  return FD->getParamDecl(I);
361  return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
362  }
363  };
364 
365  /// Sanitizers enabled for this function.
367 
368  /// True if CodeGen currently emits code implementing sanitizer checks.
369  bool IsSanitizerScope = false;
370 
371  /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
373  CodeGenFunction *CGF;
374  public:
375  SanitizerScope(CodeGenFunction *CGF);
376  ~SanitizerScope();
377  };
378 
379  /// In C++, whether we are code generating a thunk. This controls whether we
380  /// should emit cleanups.
381  bool CurFuncIsThunk = false;
382 
383  /// In ARC, whether we should autorelease the return value.
384  bool AutoreleaseResult = false;
385 
386  /// Whether we processed a Microsoft-style asm block during CodeGen. These can
387  /// potentially set the return value.
388  bool SawAsmBlock = false;
389 
390  const FunctionDecl *CurSEHParent = nullptr;
391 
392  /// True if the current function is an outlined SEH helper. This can be a
393  /// finally block or filter expression.
394  bool IsOutlinedSEHHelper = false;
395 
396  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
397  llvm::Value *BlockPointer = nullptr;
398 
399  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
400  FieldDecl *LambdaThisCaptureField = nullptr;
401 
402  /// A mapping from NRVO variables to the flags used to indicate
403  /// when the NRVO has been applied to this variable.
404  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
405 
409 
410  llvm::Instruction *CurrentFuncletPad = nullptr;
411 
412  class CallLifetimeEnd final : public EHScopeStack::Cleanup {
413  llvm::Value *Addr;
414  llvm::Value *Size;
415 
416  public:
418  : Addr(addr.getPointer()), Size(size) {}
419 
420  void Emit(CodeGenFunction &CGF, Flags flags) override {
421  CGF.EmitLifetimeEnd(Size, Addr);
422  }
423  };
424 
425  /// Header for data within LifetimeExtendedCleanupStack.
427  /// The size of the following cleanup object.
428  unsigned Size;
429  /// The kind of cleanup to push: a value from the CleanupKind enumeration.
431 
432  size_t getSize() const { return Size; }
433  CleanupKind getKind() const { return Kind; }
434  };
435 
436  /// i32s containing the indexes of the cleanup destinations.
437  Address NormalCleanupDest = Address::invalid();
438 
439  unsigned NextCleanupDestIndex = 1;
440 
441  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
442  CGBlockInfo *FirstBlockInfo = nullptr;
443 
444  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
445  llvm::BasicBlock *EHResumeBlock = nullptr;
446 
447  /// The exception slot. All landing pads write the current exception pointer
448  /// into this alloca.
449  llvm::Value *ExceptionSlot = nullptr;
450 
451  /// The selector slot. Under the MandatoryCleanup model, all landing pads
452  /// write the current selector value into this alloca.
453  llvm::AllocaInst *EHSelectorSlot = nullptr;
454 
455  /// A stack of exception code slots. Entering an __except block pushes a slot
456  /// on the stack and leaving pops one. The __exception_code() intrinsic loads
457  /// a value from the top of the stack.
459 
460  /// Value returned by __exception_info intrinsic.
461  llvm::Value *SEHInfo = nullptr;
462 
463  /// Emits a landing pad for the current EH stack.
464  llvm::BasicBlock *EmitLandingPad();
465 
466  llvm::BasicBlock *getInvokeDestImpl();
467 
468  template <class T>
470  return DominatingValue<T>::save(*this, value);
471  }
472 
473 public:
474  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
475  /// rethrows.
477 
478  /// A class controlling the emission of a finally block.
479  class FinallyInfo {
480  /// Where the catchall's edge through the cleanup should go.
481  JumpDest RethrowDest;
482 
483  /// A function to call to enter the catch.
484  llvm::Constant *BeginCatchFn;
485 
486  /// An i1 variable indicating whether or not the @finally is
487  /// running for an exception.
488  llvm::AllocaInst *ForEHVar;
489 
490  /// An i8* variable into which the exception pointer to rethrow
491  /// has been saved.
492  llvm::AllocaInst *SavedExnVar;
493 
494  public:
495  void enter(CodeGenFunction &CGF, const Stmt *Finally,
496  llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
497  llvm::Constant *rethrowFn);
498  void exit(CodeGenFunction &CGF);
499  };
500 
501  /// Returns true inside SEH __try blocks.
502  bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
503 
504  /// Returns true while emitting a cleanuppad.
505  bool isCleanupPadScope() const {
506  return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
507  }
508 
509  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
510  /// current full-expression. Safe against the possibility that
511  /// we're currently inside a conditionally-evaluated expression.
512  template <class T, class... As>
514  // If we're not in a conditional branch, or if none of the
515  // arguments requires saving, then use the unconditional cleanup.
516  if (!isInConditionalBranch())
517  return EHStack.pushCleanup<T>(kind, A...);
518 
519  // Stash values in a tuple so we can guarantee the order of saves.
520  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
521  SavedTuple Saved{saveValueInCond(A)...};
522 
523  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
524  EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
525  initFullExprCleanup();
526  }
527 
528  /// Queue a cleanup to be pushed after finishing the current
529  /// full-expression.
530  template <class T, class... As>
532  assert(!isInConditionalBranch() && "can't defer conditional cleanup");
533 
534  LifetimeExtendedCleanupHeader Header = { sizeof(T), Kind };
535 
536  size_t OldSize = LifetimeExtendedCleanupStack.size();
537  LifetimeExtendedCleanupStack.resize(
538  LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size);
539 
540  static_assert(sizeof(Header) % alignof(T) == 0,
541  "Cleanup will be allocated on misaligned address");
542  char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
543  new (Buffer) LifetimeExtendedCleanupHeader(Header);
544  new (Buffer + sizeof(Header)) T(A...);
545  }
546 
547  /// Set up the last cleaup that was pushed as a conditional
548  /// full-expression cleanup.
549  void initFullExprCleanup();
550 
551  /// PushDestructorCleanup - Push a cleanup to call the
552  /// complete-object destructor of an object of the given type at the
553  /// given address. Does nothing if T is not a C++ class type with a
554  /// non-trivial destructor.
555  void PushDestructorCleanup(QualType T, Address Addr);
556 
557  /// PushDestructorCleanup - Push a cleanup to call the
558  /// complete-object variant of the given destructor on the object at
559  /// the given address.
560  void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
561 
562  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
563  /// process all branch fixups.
564  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
565 
566  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
567  /// The block cannot be reactivated. Pops it if it's the top of the
568  /// stack.
569  ///
570  /// \param DominatingIP - An instruction which is known to
571  /// dominate the current IP (if set) and which lies along
572  /// all paths of execution between the current IP and the
573  /// the point at which the cleanup comes into scope.
574  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
575  llvm::Instruction *DominatingIP);
576 
577  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
578  /// Cannot be used to resurrect a deactivated cleanup.
579  ///
580  /// \param DominatingIP - An instruction which is known to
581  /// dominate the current IP (if set) and which lies along
582  /// all paths of execution between the current IP and the
583  /// the point at which the cleanup comes into scope.
584  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
585  llvm::Instruction *DominatingIP);
586 
587  /// Enters a new scope for capturing cleanups, all of which
588  /// will be executed once the scope is exited.
590  EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
591  size_t LifetimeExtendedCleanupStackSize;
592  bool OldDidCallStackSave;
593  protected:
595  private:
596 
597  RunCleanupsScope(const RunCleanupsScope &) = delete;
598  void operator=(const RunCleanupsScope &) = delete;
599 
600  protected:
601  CodeGenFunction& CGF;
602 
603  public:
604  /// Enter a new cleanup scope.
605  explicit RunCleanupsScope(CodeGenFunction &CGF)
606  : PerformCleanup(true), CGF(CGF)
607  {
608  CleanupStackDepth = CGF.EHStack.stable_begin();
609  LifetimeExtendedCleanupStackSize =
610  CGF.LifetimeExtendedCleanupStack.size();
611  OldDidCallStackSave = CGF.DidCallStackSave;
612  CGF.DidCallStackSave = false;
613  OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
614  CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
615  }
616 
617  /// Exit this cleanup scope, emitting any accumulated cleanups.
619  if (PerformCleanup)
620  ForceCleanup();
621  }
622 
623  /// Determine whether this scope requires any cleanups.
624  bool requiresCleanups() const {
625  return CGF.EHStack.stable_begin() != CleanupStackDepth;
626  }
627 
628  /// Force the emission of cleanups now, instead of waiting
629  /// until this object is destroyed.
630  /// \param ValuesToReload - A list of values that need to be available at
631  /// the insertion point after cleanup emission. If cleanup emission created
632  /// a shared cleanup block, these value pointers will be rewritten.
633  /// Otherwise, they not will be modified.
634  void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
635  assert(PerformCleanup && "Already forced cleanup");
636  CGF.DidCallStackSave = OldDidCallStackSave;
637  CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
638  ValuesToReload);
639  PerformCleanup = false;
640  CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
641  }
642  };
643 
644  // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
645  EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
646  EHScopeStack::stable_end();
647 
649  SourceRange Range;
651  LexicalScope *ParentScope;
652 
653  LexicalScope(const LexicalScope &) = delete;
654  void operator=(const LexicalScope &) = delete;
655 
656  public:
657  /// Enter a new cleanup scope.
658  explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
659  : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
660  CGF.CurLexicalScope = this;
661  if (CGDebugInfo *DI = CGF.getDebugInfo())
662  DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
663  }
664 
665  void addLabel(const LabelDecl *label) {
666  assert(PerformCleanup && "adding label to dead scope?");
667  Labels.push_back(label);
668  }
669 
670  /// Exit this cleanup scope, emitting any accumulated
671  /// cleanups.
673  if (CGDebugInfo *DI = CGF.getDebugInfo())
674  DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
675 
676  // If we should perform a cleanup, force them now. Note that
677  // this ends the cleanup scope before rescoping any labels.
678  if (PerformCleanup) {
679  ApplyDebugLocation DL(CGF, Range.getEnd());
680  ForceCleanup();
681  }
682  }
683 
684  /// Force the emission of cleanups now, instead of waiting
685  /// until this object is destroyed.
686  void ForceCleanup() {
687  CGF.CurLexicalScope = ParentScope;
688  RunCleanupsScope::ForceCleanup();
689 
690  if (!Labels.empty())
691  rescopeLabels();
692  }
693 
694  bool hasLabels() const {
695  return !Labels.empty();
696  }
697 
698  void rescopeLabels();
699  };
700 
701  typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
702 
703  /// The class used to assign some variables some temporarily addresses.
704  class OMPMapVars {
705  DeclMapTy SavedLocals;
706  DeclMapTy SavedTempAddresses;
707  OMPMapVars(const OMPMapVars &) = delete;
708  void operator=(const OMPMapVars &) = delete;
709 
710  public:
711  explicit OMPMapVars() = default;
713  assert(SavedLocals.empty() && "Did not restored original addresses.");
714  };
715 
716  /// Sets the address of the variable \p LocalVD to be \p TempAddr in
717  /// function \p CGF.
718  /// \return true if at least one variable was set already, false otherwise.
719  bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
720  Address TempAddr) {
721  LocalVD = LocalVD->getCanonicalDecl();
722  // Only save it once.
723  if (SavedLocals.count(LocalVD)) return false;
724 
725  // Copy the existing local entry to SavedLocals.
726  auto it = CGF.LocalDeclMap.find(LocalVD);
727  if (it != CGF.LocalDeclMap.end())
728  SavedLocals.try_emplace(LocalVD, it->second);
729  else
730  SavedLocals.try_emplace(LocalVD, Address::invalid());
731 
732  // Generate the private entry.
733  QualType VarTy = LocalVD->getType();
734  if (VarTy->isReferenceType()) {
735  Address Temp = CGF.CreateMemTemp(VarTy);
736  CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
737  TempAddr = Temp;
738  }
739  SavedTempAddresses.try_emplace(LocalVD, TempAddr);
740 
741  return true;
742  }
743 
744  /// Applies new addresses to the list of the variables.
745  /// \return true if at least one variable is using new address, false
746  /// otherwise.
747  bool apply(CodeGenFunction &CGF) {
748  copyInto(SavedTempAddresses, CGF.LocalDeclMap);
749  SavedTempAddresses.clear();
750  return !SavedLocals.empty();
751  }
752 
753  /// Restores original addresses of the variables.
754  void restore(CodeGenFunction &CGF) {
755  if (!SavedLocals.empty()) {
756  copyInto(SavedLocals, CGF.LocalDeclMap);
757  SavedLocals.clear();
758  }
759  }
760 
761  private:
762  /// Copy all the entries in the source map over the corresponding
763  /// entries in the destination, which must exist.
764  static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
765  for (auto &Pair : Src) {
766  if (!Pair.second.isValid()) {
767  Dest.erase(Pair.first);
768  continue;
769  }
770 
771  auto I = Dest.find(Pair.first);
772  if (I != Dest.end())
773  I->second = Pair.second;
774  else
775  Dest.insert(Pair);
776  }
777  }
778  };
779 
780  /// The scope used to remap some variables as private in the OpenMP loop body
781  /// (or other captured region emitted without outlining), and to restore old
782  /// vars back on exit.
784  OMPMapVars MappedVars;
785  OMPPrivateScope(const OMPPrivateScope &) = delete;
786  void operator=(const OMPPrivateScope &) = delete;
787 
788  public:
789  /// Enter a new OpenMP private scope.
790  explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
791 
792  /// Registers \p LocalVD variable as a private and apply \p PrivateGen
793  /// function for it to generate corresponding private variable. \p
794  /// PrivateGen returns an address of the generated private variable.
795  /// \return true if the variable is registered as private, false if it has
796  /// been privatized already.
797  bool addPrivate(const VarDecl *LocalVD,
798  const llvm::function_ref<Address()> PrivateGen) {
799  assert(PerformCleanup && "adding private to dead scope");
800  return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
801  }
802 
803  /// Privatizes local variables previously registered as private.
804  /// Registration is separate from the actual privatization to allow
805  /// initializers use values of the original variables, not the private one.
806  /// This is important, for example, if the private variable is a class
807  /// variable initialized by a constructor that references other private
808  /// variables. But at initialization original variables must be used, not
809  /// private copies.
810  /// \return true if at least one variable was privatized, false otherwise.
811  bool Privatize() { return MappedVars.apply(CGF); }
812 
813  void ForceCleanup() {
814  RunCleanupsScope::ForceCleanup();
815  MappedVars.restore(CGF);
816  }
817 
818  /// Exit scope - all the mapped variables are restored.
820  if (PerformCleanup)
821  ForceCleanup();
822  }
823 
824  /// Checks if the global variable is captured in current function.
825  bool isGlobalVarCaptured(const VarDecl *VD) const {
826  VD = VD->getCanonicalDecl();
827  return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
828  }
829  };
830 
831  /// Takes the old cleanup stack size and emits the cleanup blocks
832  /// that have been added.
833  void
834  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
835  std::initializer_list<llvm::Value **> ValuesToReload = {});
836 
837  /// Takes the old cleanup stack size and emits the cleanup blocks
838  /// that have been added, then adds all lifetime-extended cleanups from
839  /// the given position to the stack.
840  void
841  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
842  size_t OldLifetimeExtendedStackSize,
843  std::initializer_list<llvm::Value **> ValuesToReload = {});
844 
845  void ResolveBranchFixups(llvm::BasicBlock *Target);
846 
847  /// The given basic block lies in the current EH scope, but may be a
848  /// target of a potentially scope-crossing jump; get a stable handle
849  /// to which we can perform this jump later.
850  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
851  return JumpDest(Target,
852  EHStack.getInnermostNormalCleanup(),
853  NextCleanupDestIndex++);
854  }
855 
856  /// The given basic block lies in the current EH scope, but may be a
857  /// target of a potentially scope-crossing jump; get a stable handle
858  /// to which we can perform this jump later.
859  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
860  return getJumpDestInCurrentScope(createBasicBlock(Name));
861  }
862 
863  /// EmitBranchThroughCleanup - Emit a branch from the current insert
864  /// block through the normal cleanup handling code (if any) and then
865  /// on to \arg Dest.
866  void EmitBranchThroughCleanup(JumpDest Dest);
867 
868  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
869  /// specified destination obviously has no cleanups to run. 'false' is always
870  /// a conservatively correct answer for this method.
871  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
872 
873  /// popCatchScope - Pops the catch scope at the top of the EHScope
874  /// stack, emitting any required code (other than the catch handlers
875  /// themselves).
876  void popCatchScope();
877 
878  llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
879  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
880  llvm::BasicBlock *
881  getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
882 
883  /// An object to manage conditionally-evaluated expressions.
885  llvm::BasicBlock *StartBB;
886 
887  public:
888  ConditionalEvaluation(CodeGenFunction &CGF)
889  : StartBB(CGF.Builder.GetInsertBlock()) {}
890 
891  void begin(CodeGenFunction &CGF) {
892  assert(CGF.OutermostConditional != this);
893  if (!CGF.OutermostConditional)
894  CGF.OutermostConditional = this;
895  }
896 
897  void end(CodeGenFunction &CGF) {
898  assert(CGF.OutermostConditional != nullptr);
899  if (CGF.OutermostConditional == this)
900  CGF.OutermostConditional = nullptr;
901  }
902 
903  /// Returns a block which will be executed prior to each
904  /// evaluation of the conditional code.
905  llvm::BasicBlock *getStartingBlock() const {
906  return StartBB;
907  }
908  };
909 
910  /// isInConditionalBranch - Return true if we're currently emitting
911  /// one branch or the other of a conditional expression.
912  bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
913 
915  assert(isInConditionalBranch());
916  llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
917  auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
918  store->setAlignment(addr.getAlignment().getQuantity());
919  }
920 
921  /// An RAII object to record that we're evaluating a statement
922  /// expression.
924  CodeGenFunction &CGF;
925 
926  /// We have to save the outermost conditional: cleanups in a
927  /// statement expression aren't conditional just because the
928  /// StmtExpr is.
929  ConditionalEvaluation *SavedOutermostConditional;
930 
931  public:
932  StmtExprEvaluation(CodeGenFunction &CGF)
933  : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
934  CGF.OutermostConditional = nullptr;
935  }
936 
938  CGF.OutermostConditional = SavedOutermostConditional;
939  CGF.EnsureInsertPoint();
940  }
941  };
942 
943  /// An object which temporarily prevents a value from being
944  /// destroyed by aggressive peephole optimizations that assume that
945  /// all uses of a value have been realized in the IR.
947  llvm::Instruction *Inst;
948  friend class CodeGenFunction;
949 
950  public:
951  PeepholeProtection() : Inst(nullptr) {}
952  };
953 
954  /// A non-RAII class containing all the information about a bound
955  /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
956  /// this which makes individual mappings very simple; using this
957  /// class directly is useful when you have a variable number of
958  /// opaque values or don't want the RAII functionality for some
959  /// reason.
961  const OpaqueValueExpr *OpaqueValue;
962  bool BoundLValue;
964 
966  bool boundLValue)
967  : OpaqueValue(ov), BoundLValue(boundLValue) {}
968  public:
969  OpaqueValueMappingData() : OpaqueValue(nullptr) {}
970 
971  static bool shouldBindAsLValue(const Expr *expr) {
972  // gl-values should be bound as l-values for obvious reasons.
973  // Records should be bound as l-values because IR generation
974  // always keeps them in memory. Expressions of function type
975  // act exactly like l-values but are formally required to be
976  // r-values in C.
977  return expr->isGLValue() ||
978  expr->getType()->isFunctionType() ||
979  hasAggregateEvaluationKind(expr->getType());
980  }
981 
982  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
983  const OpaqueValueExpr *ov,
984  const Expr *e) {
985  if (shouldBindAsLValue(ov))
986  return bind(CGF, ov, CGF.EmitLValue(e));
987  return bind(CGF, ov, CGF.EmitAnyExpr(e));
988  }
989 
990  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
991  const OpaqueValueExpr *ov,
992  const LValue &lv) {
993  assert(shouldBindAsLValue(ov));
994  CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
995  return OpaqueValueMappingData(ov, true);
996  }
997 
998  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
999  const OpaqueValueExpr *ov,
1000  const RValue &rv) {
1001  assert(!shouldBindAsLValue(ov));
1002  CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1003 
1004  OpaqueValueMappingData data(ov, false);
1005 
1006  // Work around an extremely aggressive peephole optimization in
1007  // EmitScalarConversion which assumes that all other uses of a
1008  // value are extant.
1009  data.Protection = CGF.protectFromPeepholes(rv);
1010 
1011  return data;
1012  }
1013 
1014  bool isValid() const { return OpaqueValue != nullptr; }
1015  void clear() { OpaqueValue = nullptr; }
1016 
1017  void unbind(CodeGenFunction &CGF) {
1018  assert(OpaqueValue && "no data to unbind!");
1019 
1020  if (BoundLValue) {
1021  CGF.OpaqueLValues.erase(OpaqueValue);
1022  } else {
1023  CGF.OpaqueRValues.erase(OpaqueValue);
1024  CGF.unprotectFromPeepholes(Protection);
1025  }
1026  }
1027  };
1028 
1029  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1031  CodeGenFunction &CGF;
1033 
1034  public:
1035  static bool shouldBindAsLValue(const Expr *expr) {
1036  return OpaqueValueMappingData::shouldBindAsLValue(expr);
1037  }
1038 
1039  /// Build the opaque value mapping for the given conditional
1040  /// operator if it's the GNU ?: extension. This is a common
1041  /// enough pattern that the convenience operator is really
1042  /// helpful.
1043  ///
1044  OpaqueValueMapping(CodeGenFunction &CGF,
1045  const AbstractConditionalOperator *op) : CGF(CGF) {
1046  if (isa<ConditionalOperator>(op))
1047  // Leave Data empty.
1048  return;
1049 
1050  const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1051  Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1052  e->getCommon());
1053  }
1054 
1055  /// Build the opaque value mapping for an OpaqueValueExpr whose source
1056  /// expression is set to the expression the OVE represents.
1057  OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
1058  : CGF(CGF) {
1059  if (OV) {
1060  assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1061  "for OVE with no source expression");
1062  Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
1063  }
1064  }
1065 
1066  OpaqueValueMapping(CodeGenFunction &CGF,
1067  const OpaqueValueExpr *opaqueValue,
1068  LValue lvalue)
1069  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1070  }
1071 
1072  OpaqueValueMapping(CodeGenFunction &CGF,
1073  const OpaqueValueExpr *opaqueValue,
1074  RValue rvalue)
1075  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1076  }
1077 
1078  void pop() {
1079  Data.unbind(CGF);
1080  Data.clear();
1081  }
1082 
1084  if (Data.isValid()) Data.unbind(CGF);
1085  }
1086  };
1087 
1088 private:
1089  CGDebugInfo *DebugInfo;
1090  bool DisableDebugInfo = false;
1091 
1092  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1093  /// calling llvm.stacksave for multiple VLAs in the same scope.
1094  bool DidCallStackSave = false;
1095 
1096  /// IndirectBranch - The first time an indirect goto is seen we create a block
1097  /// with an indirect branch. Every time we see the address of a label taken,
1098  /// we add the label to the indirect goto. Every subsequent indirect goto is
1099  /// codegen'd as a jump to the IndirectBranch's basic block.
1100  llvm::IndirectBrInst *IndirectBranch = nullptr;
1101 
1102  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1103  /// decls.
1104  DeclMapTy LocalDeclMap;
1105 
1106  // Keep track of the cleanups for callee-destructed parameters pushed to the
1107  // cleanup stack so that they can be deactivated later.
1108  llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1109  CalleeDestructedParamCleanups;
1110 
1111  /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1112  /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1113  /// parameter.
1114  llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1115  SizeArguments;
1116 
1117  /// Track escaped local variables with auto storage. Used during SEH
1118  /// outlining to produce a call to llvm.localescape.
1119  llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1120 
1121  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1122  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1123 
1124  // BreakContinueStack - This keeps track of where break and continue
1125  // statements should jump to.
1126  struct BreakContinue {
1127  BreakContinue(JumpDest Break, JumpDest Continue)
1128  : BreakBlock(Break), ContinueBlock(Continue) {}
1129 
1130  JumpDest BreakBlock;
1131  JumpDest ContinueBlock;
1132  };
1133  SmallVector<BreakContinue, 8> BreakContinueStack;
1134 
1135  /// Handles cancellation exit points in OpenMP-related constructs.
1136  class OpenMPCancelExitStack {
1137  /// Tracks cancellation exit point and join point for cancel-related exit
1138  /// and normal exit.
1139  struct CancelExit {
1140  CancelExit() = default;
1141  CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1142  JumpDest ContBlock)
1143  : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1145  /// true if the exit block has been emitted already by the special
1146  /// emitExit() call, false if the default codegen is used.
1147  bool HasBeenEmitted = false;
1148  JumpDest ExitBlock;
1149  JumpDest ContBlock;
1150  };
1151 
1153 
1154  public:
1155  OpenMPCancelExitStack() : Stack(1) {}
1156  ~OpenMPCancelExitStack() = default;
1157  /// Fetches the exit block for the current OpenMP construct.
1158  JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1159  /// Emits exit block with special codegen procedure specific for the related
1160  /// OpenMP construct + emits code for normal construct cleanup.
1161  void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1162  const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1163  if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1164  assert(CGF.getOMPCancelDestination(Kind).isValid());
1165  assert(CGF.HaveInsertPoint());
1166  assert(!Stack.back().HasBeenEmitted);
1167  auto IP = CGF.Builder.saveAndClearIP();
1168  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1169  CodeGen(CGF);
1170  CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1171  CGF.Builder.restoreIP(IP);
1172  Stack.back().HasBeenEmitted = true;
1173  }
1174  CodeGen(CGF);
1175  }
1176  /// Enter the cancel supporting \a Kind construct.
1177  /// \param Kind OpenMP directive that supports cancel constructs.
1178  /// \param HasCancel true, if the construct has inner cancel directive,
1179  /// false otherwise.
1180  void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1181  Stack.push_back({Kind,
1182  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1183  : JumpDest(),
1184  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1185  : JumpDest()});
1186  }
1187  /// Emits default exit point for the cancel construct (if the special one
1188  /// has not be used) + join point for cancel/normal exits.
1189  void exit(CodeGenFunction &CGF) {
1190  if (getExitBlock().isValid()) {
1191  assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1192  bool HaveIP = CGF.HaveInsertPoint();
1193  if (!Stack.back().HasBeenEmitted) {
1194  if (HaveIP)
1195  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1196  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1197  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1198  }
1199  CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1200  if (!HaveIP) {
1201  CGF.Builder.CreateUnreachable();
1202  CGF.Builder.ClearInsertionPoint();
1203  }
1204  }
1205  Stack.pop_back();
1206  }
1207  };
1208  OpenMPCancelExitStack OMPCancelStack;
1209 
1210  CodeGenPGO PGO;
1211 
1212  /// Calculate branch weights appropriate for PGO data
1213  llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount);
1214  llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights);
1215  llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1216  uint64_t LoopCount);
1217 
1218 public:
1219  /// Increment the profiler's counter for the given statement by \p StepV.
1220  /// If \p StepV is null, the default increment is 1.
1221  void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1223  PGO.emitCounterIncrement(Builder, S, StepV);
1224  PGO.setCurrentStmt(S);
1225  }
1226 
1227  /// Get the profiler's count for the given statement.
1228  uint64_t getProfileCount(const Stmt *S) {
1229  Optional<uint64_t> Count = PGO.getStmtCount(S);
1230  if (!Count.hasValue())
1231  return 0;
1232  return *Count;
1233  }
1234 
1235  /// Set the profiler's current count.
1236  void setCurrentProfileCount(uint64_t Count) {
1237  PGO.setCurrentRegionCount(Count);
1238  }
1239 
1240  /// Get the profiler's current count. This is generally the count for the most
1241  /// recently incremented counter.
1243  return PGO.getCurrentRegionCount();
1244  }
1245 
1246 private:
1247 
1248  /// SwitchInsn - This is nearest current switch instruction. It is null if
1249  /// current context is not in a switch.
1250  llvm::SwitchInst *SwitchInsn = nullptr;
1251  /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1252  SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1253 
1254  /// CaseRangeBlock - This block holds if condition check for last case
1255  /// statement range in current switch instruction.
1256  llvm::BasicBlock *CaseRangeBlock = nullptr;
1257 
1258  /// OpaqueLValues - Keeps track of the current set of opaque value
1259  /// expressions.
1260  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1261  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1262 
1263  // VLASizeMap - This keeps track of the associated size for each VLA type.
1264  // We track this by the size expression rather than the type itself because
1265  // in certain situations, like a const qualifier applied to an VLA typedef,
1266  // multiple VLA types can share the same size expression.
1267  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1268  // enter/leave scopes.
1269  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1270 
1271  /// A block containing a single 'unreachable' instruction. Created
1272  /// lazily by getUnreachableBlock().
1273  llvm::BasicBlock *UnreachableBlock = nullptr;
1274 
1275  /// Counts of the number return expressions in the function.
1276  unsigned NumReturnExprs = 0;
1277 
1278  /// Count the number of simple (constant) return expressions in the function.
1279  unsigned NumSimpleReturnExprs = 0;
1280 
1281  /// The last regular (non-return) debug location (breakpoint) in the function.
1282  SourceLocation LastStopPoint;
1283 
1284 public:
1285  /// A scope within which we are constructing the fields of an object which
1286  /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1287  /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1289  public:
1290  FieldConstructionScope(CodeGenFunction &CGF, Address This)
1291  : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1292  CGF.CXXDefaultInitExprThis = This;
1293  }
1295  CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1296  }
1297 
1298  private:
1299  CodeGenFunction &CGF;
1300  Address OldCXXDefaultInitExprThis;
1301  };
1302 
1303  /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1304  /// is overridden to be the object under construction.
1306  public:
1307  CXXDefaultInitExprScope(CodeGenFunction &CGF)
1308  : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1309  OldCXXThisAlignment(CGF.CXXThisAlignment) {
1310  CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
1311  CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1312  }
1314  CGF.CXXThisValue = OldCXXThisValue;
1315  CGF.CXXThisAlignment = OldCXXThisAlignment;
1316  }
1317 
1318  public:
1319  CodeGenFunction &CGF;
1322  };
1323 
1324  /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1325  /// current loop index is overridden.
1327  public:
1328  ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1329  : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1330  CGF.ArrayInitIndex = Index;
1331  }
1333  CGF.ArrayInitIndex = OldArrayInitIndex;
1334  }
1335 
1336  private:
1337  CodeGenFunction &CGF;
1338  llvm::Value *OldArrayInitIndex;
1339  };
1340 
1342  public:
1344  : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1345  OldCurCodeDecl(CGF.CurCodeDecl),
1346  OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1347  OldCXXABIThisValue(CGF.CXXABIThisValue),
1348  OldCXXThisValue(CGF.CXXThisValue),
1349  OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1350  OldCXXThisAlignment(CGF.CXXThisAlignment),
1351  OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1352  OldCXXInheritedCtorInitExprArgs(
1353  std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1354  CGF.CurGD = GD;
1355  CGF.CurFuncDecl = CGF.CurCodeDecl =
1356  cast<CXXConstructorDecl>(GD.getDecl());
1357  CGF.CXXABIThisDecl = nullptr;
1358  CGF.CXXABIThisValue = nullptr;
1359  CGF.CXXThisValue = nullptr;
1360  CGF.CXXABIThisAlignment = CharUnits();
1361  CGF.CXXThisAlignment = CharUnits();
1362  CGF.ReturnValue = Address::invalid();
1363  CGF.FnRetTy = QualType();
1364  CGF.CXXInheritedCtorInitExprArgs.clear();
1365  }
1367  CGF.CurGD = OldCurGD;
1368  CGF.CurFuncDecl = OldCurFuncDecl;
1369  CGF.CurCodeDecl = OldCurCodeDecl;
1370  CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1371  CGF.CXXABIThisValue = OldCXXABIThisValue;
1372  CGF.CXXThisValue = OldCXXThisValue;
1373  CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1374  CGF.CXXThisAlignment = OldCXXThisAlignment;
1375  CGF.ReturnValue = OldReturnValue;
1376  CGF.FnRetTy = OldFnRetTy;
1377  CGF.CXXInheritedCtorInitExprArgs =
1378  std::move(OldCXXInheritedCtorInitExprArgs);
1379  }
1380 
1381  private:
1382  CodeGenFunction &CGF;
1383  GlobalDecl OldCurGD;
1384  const Decl *OldCurFuncDecl;
1385  const Decl *OldCurCodeDecl;
1386  ImplicitParamDecl *OldCXXABIThisDecl;
1387  llvm::Value *OldCXXABIThisValue;
1388  llvm::Value *OldCXXThisValue;
1389  CharUnits OldCXXABIThisAlignment;
1390  CharUnits OldCXXThisAlignment;
1391  Address OldReturnValue;
1392  QualType OldFnRetTy;
1393  CallArgList OldCXXInheritedCtorInitExprArgs;
1394  };
1395 
1396 private:
1397  /// CXXThisDecl - When generating code for a C++ member function,
1398  /// this will hold the implicit 'this' declaration.
1399  ImplicitParamDecl *CXXABIThisDecl = nullptr;
1400  llvm::Value *CXXABIThisValue = nullptr;
1401  llvm::Value *CXXThisValue = nullptr;
1402  CharUnits CXXABIThisAlignment;
1403  CharUnits CXXThisAlignment;
1404 
1405  /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
1406  /// this expression.
1407  Address CXXDefaultInitExprThis = Address::invalid();
1408 
1409  /// The current array initialization index when evaluating an
1410  /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
1411  llvm::Value *ArrayInitIndex = nullptr;
1412 
1413  /// The values of function arguments to use when evaluating
1414  /// CXXInheritedCtorInitExprs within this context.
1415  CallArgList CXXInheritedCtorInitExprArgs;
1416 
1417  /// CXXStructorImplicitParamDecl - When generating code for a constructor or
1418  /// destructor, this will hold the implicit argument (e.g. VTT).
1419  ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
1420  llvm::Value *CXXStructorImplicitParamValue = nullptr;
1421 
1422  /// OutermostConditional - Points to the outermost active
1423  /// conditional control. This is used so that we know if a
1424  /// temporary should be destroyed conditionally.
1425  ConditionalEvaluation *OutermostConditional = nullptr;
1426 
1427  /// The current lexical scope.
1428  LexicalScope *CurLexicalScope = nullptr;
1429 
1430  /// The current source location that should be used for exception
1431  /// handling code.
1432  SourceLocation CurEHLocation;
1433 
1434  /// BlockByrefInfos - For each __block variable, contains
1435  /// information about the layout of the variable.
1436  llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
1437 
1438  /// Used by -fsanitize=nullability-return to determine whether the return
1439  /// value can be checked.
1440  llvm::Value *RetValNullabilityPrecondition = nullptr;
1441 
1442  /// Check if -fsanitize=nullability-return instrumentation is required for
1443  /// this function.
1444  bool requiresReturnValueNullabilityCheck() const {
1445  return RetValNullabilityPrecondition;
1446  }
1447 
1448  /// Used to store precise source locations for return statements by the
1449  /// runtime return value checks.
1450  Address ReturnLocation = Address::invalid();
1451 
1452  /// Check if the return value of this function requires sanitization.
1453  bool requiresReturnValueCheck() const {
1454  return requiresReturnValueNullabilityCheck() ||
1455  (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1456  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>());
1457  }
1458 
1459  llvm::BasicBlock *TerminateLandingPad = nullptr;
1460  llvm::BasicBlock *TerminateHandler = nullptr;
1461  llvm::BasicBlock *TrapBB = nullptr;
1462 
1463  /// Terminate funclets keyed by parent funclet pad.
1464  llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
1465 
1466  /// Largest vector width used in ths function. Will be used to create a
1467  /// function attribute.
1468  unsigned LargestVectorWidth = 0;
1469 
1470  /// True if we need emit the life-time markers.
1471  const bool ShouldEmitLifetimeMarkers;
1472 
1473  /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
1474  /// the function metadata.
1475  void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
1476  llvm::Function *Fn);
1477 
1478 public:
1479  CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1480  ~CodeGenFunction();
1481 
1482  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1483  ASTContext &getContext() const { return CGM.getContext(); }
1485  if (DisableDebugInfo)
1486  return nullptr;
1487  return DebugInfo;
1488  }
1489  void disableDebugInfo() { DisableDebugInfo = true; }
1490  void enableDebugInfo() { DisableDebugInfo = false; }
1491 
1493  return CGM.getCodeGenOpts().OptimizationLevel == 0;
1494  }
1495 
1496  const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1497 
1498  /// Returns a pointer to the function's exception object and selector slot,
1499  /// which is assigned in every landing pad.
1500  Address getExceptionSlot();
1501  Address getEHSelectorSlot();
1502 
1503  /// Returns the contents of the function's exception object and selector
1504  /// slots.
1505  llvm::Value *getExceptionFromSlot();
1506  llvm::Value *getSelectorFromSlot();
1507 
1508  Address getNormalCleanupDestSlot();
1509 
1510  llvm::BasicBlock *getUnreachableBlock() {
1511  if (!UnreachableBlock) {
1512  UnreachableBlock = createBasicBlock("unreachable");
1513  new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1514  }
1515  return UnreachableBlock;
1516  }
1517 
1518  llvm::BasicBlock *getInvokeDest() {
1519  if (!EHStack.requiresLandingPad()) return nullptr;
1520  return getInvokeDestImpl();
1521  }
1522 
1523  bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; }
1524 
1525  const TargetInfo &getTarget() const { return Target; }
1526  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1528  return CGM.getTargetCodeGenInfo();
1529  }
1530 
1531  //===--------------------------------------------------------------------===//
1532  // Cleanups
1533  //===--------------------------------------------------------------------===//
1534 
1535  typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
1536 
1537  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1538  Address arrayEndPointer,
1539  QualType elementType,
1540  CharUnits elementAlignment,
1541  Destroyer *destroyer);
1542  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1543  llvm::Value *arrayEnd,
1544  QualType elementType,
1545  CharUnits elementAlignment,
1546  Destroyer *destroyer);
1547 
1548  void pushDestroy(QualType::DestructionKind dtorKind,
1549  Address addr, QualType type);
1550  void pushEHDestroy(QualType::DestructionKind dtorKind,
1551  Address addr, QualType type);
1552  void pushDestroy(CleanupKind kind, Address addr, QualType type,
1553  Destroyer *destroyer, bool useEHCleanupForArray);
1554  void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
1555  QualType type, Destroyer *destroyer,
1556  bool useEHCleanupForArray);
1557  void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1558  llvm::Value *CompletePtr,
1559  QualType ElementType);
1560  void pushStackRestore(CleanupKind kind, Address SPMem);
1561  void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
1562  bool useEHCleanupForArray);
1563  llvm::Function *generateDestroyHelper(Address addr, QualType type,
1564  Destroyer *destroyer,
1565  bool useEHCleanupForArray,
1566  const VarDecl *VD);
1567  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1568  QualType elementType, CharUnits elementAlign,
1569  Destroyer *destroyer,
1570  bool checkZeroLength, bool useEHCleanup);
1571 
1572  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1573 
1574  /// Determines whether an EH cleanup is required to destroy a type
1575  /// with the given destruction kind.
1577  switch (kind) {
1578  case QualType::DK_none:
1579  return false;
1580  case QualType::DK_cxx_destructor:
1581  case QualType::DK_objc_weak_lifetime:
1582  case QualType::DK_nontrivial_c_struct:
1583  return getLangOpts().Exceptions;
1584  case QualType::DK_objc_strong_lifetime:
1585  return getLangOpts().Exceptions &&
1586  CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1587  }
1588  llvm_unreachable("bad destruction kind");
1589  }
1590 
1592  return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1593  }
1594 
1595  //===--------------------------------------------------------------------===//
1596  // Objective-C
1597  //===--------------------------------------------------------------------===//
1598 
1599  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1600 
1601  void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
1602 
1603  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1604  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1605  const ObjCPropertyImplDecl *PID);
1606  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1607  const ObjCPropertyImplDecl *propImpl,
1608  const ObjCMethodDecl *GetterMothodDecl,
1609  llvm::Constant *AtomicHelperFn);
1610 
1611  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1612  ObjCMethodDecl *MD, bool ctor);
1613 
1614  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1615  /// for the given property.
1616  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1617  const ObjCPropertyImplDecl *PID);
1618  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1619  const ObjCPropertyImplDecl *propImpl,
1620  llvm::Constant *AtomicHelperFn);
1621 
1622  //===--------------------------------------------------------------------===//
1623  // Block Bits
1624  //===--------------------------------------------------------------------===//
1625 
1626  /// Emit block literal.
1627  /// \return an LLVM value which is a pointer to a struct which contains
1628  /// information about the block, including the block invoke function, the
1629  /// captured variables, etc.
1630  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1631  static void destroyBlockInfos(CGBlockInfo *info);
1632 
1633  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1634  const CGBlockInfo &Info,
1635  const DeclMapTy &ldm,
1636  bool IsLambdaConversionToBlock,
1637  bool BuildGlobalBlock);
1638 
1639  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1640  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1641  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1642  const ObjCPropertyImplDecl *PID);
1643  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1644  const ObjCPropertyImplDecl *PID);
1645  llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1646 
1647  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1648 
1649  class AutoVarEmission;
1650 
1651  void emitByrefStructureInit(const AutoVarEmission &emission);
1652  void enterByrefCleanup(const AutoVarEmission &emission);
1653 
1654  void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
1655  llvm::Value *ptr);
1656 
1657  Address LoadBlockStruct();
1658  Address GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1659 
1660  /// BuildBlockByrefAddress - Computes the location of the
1661  /// data in a variable which is declared as __block.
1662  Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
1663  bool followForward = true);
1664  Address emitBlockByrefAddress(Address baseAddr,
1665  const BlockByrefInfo &info,
1666  bool followForward,
1667  const llvm::Twine &name);
1668 
1669  const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
1670 
1671  QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
1672 
1673  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1674  const CGFunctionInfo &FnInfo);
1675  /// Emit code for the start of a function.
1676  /// \param Loc The location to be associated with the function.
1677  /// \param StartLoc The location of the function body.
1678  void StartFunction(GlobalDecl GD,
1679  QualType RetTy,
1680  llvm::Function *Fn,
1681  const CGFunctionInfo &FnInfo,
1682  const FunctionArgList &Args,
1684  SourceLocation StartLoc = SourceLocation());
1685 
1686  static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
1687 
1688  void EmitConstructorBody(FunctionArgList &Args);
1689  void EmitDestructorBody(FunctionArgList &Args);
1690  void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
1691  void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body);
1692  void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
1693 
1694  void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
1695  CallArgList &CallArgs);
1696  void EmitLambdaBlockInvokeBody();
1697  void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1698  void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
1699  void EmitAsanPrologueOrEpilogue(bool Prologue);
1700 
1701  /// Emit the unified return block, trying to avoid its emission when
1702  /// possible.
1703  /// \return The debug location of the user written return statement if the
1704  /// return block is is avoided.
1705  llvm::DebugLoc EmitReturnBlock();
1706 
1707  /// FinishFunction - Complete IR generation of the current function. It is
1708  /// legal to call this function even if there is no current insertion point.
1709  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1710 
1711  void StartThunk(llvm::Function *Fn, GlobalDecl GD,
1712  const CGFunctionInfo &FnInfo, bool IsUnprototyped);
1713 
1714  void EmitCallAndReturnForThunk(llvm::Constant *Callee, const ThunkInfo *Thunk,
1715  bool IsUnprototyped);
1716 
1717  void FinishThunk();
1718 
1719  /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
1720  void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
1721  llvm::Value *Callee);
1722 
1723  /// Generate a thunk for the given method.
1724  void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1725  GlobalDecl GD, const ThunkInfo &Thunk,
1726  bool IsUnprototyped);
1727 
1728  llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
1729  const CGFunctionInfo &FnInfo,
1730  GlobalDecl GD, const ThunkInfo &Thunk);
1731 
1732  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1733  FunctionArgList &Args);
1734 
1735  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
1736 
1737  /// Struct with all information about dynamic [sub]class needed to set vptr.
1738  struct VPtr {
1743  };
1744 
1745  /// Initialize the vtable pointer of the given subobject.
1746  void InitializeVTablePointer(const VPtr &vptr);
1747 
1749 
1750  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1751  VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
1752 
1753  void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
1754  CharUnits OffsetFromNearestVBase,
1755  bool BaseIsNonVirtualPrimaryBase,
1756  const CXXRecordDecl *VTableClass,
1757  VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
1758 
1759  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1760 
1761  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1762  /// to by This.
1763  llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
1764  const CXXRecordDecl *VTableClass);
1765 
1774  };
1775 
1776  /// Derived is the presumed address of an object of type T after a
1777  /// cast. If T is a polymorphic class type, emit a check that the virtual
1778  /// table for Derived belongs to a class derived from T.
1779  void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
1780  bool MayBeNull, CFITypeCheckKind TCK,
1781  SourceLocation Loc);
1782 
1783  /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
1784  /// If vptr CFI is enabled, emit a check that VTable is valid.
1785  void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
1786  CFITypeCheckKind TCK, SourceLocation Loc);
1787 
1788  /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
1789  /// RD using llvm.type.test.
1790  void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
1791  CFITypeCheckKind TCK, SourceLocation Loc);
1792 
1793  /// If whole-program virtual table optimization is enabled, emit an assumption
1794  /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
1795  /// enabled, emit a check that VTable is a member of RD's type identifier.
1796  void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
1797  llvm::Value *VTable, SourceLocation Loc);
1798 
1799  /// Returns whether we should perform a type checked load when loading a
1800  /// virtual function for virtual calls to members of RD. This is generally
1801  /// true when both vcall CFI and whole-program-vtables are enabled.
1802  bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
1803 
1804  /// Emit a type checked load from the given vtable.
1805  llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
1806  uint64_t VTableByteOffset);
1807 
1808  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1809  /// given phase of destruction for a destructor. The end result
1810  /// should call destructors on members and base classes in reverse
1811  /// order of their construction.
1812  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1813 
1814  /// ShouldInstrumentFunction - Return true if the current function should be
1815  /// instrumented with __cyg_profile_func_* calls
1816  bool ShouldInstrumentFunction();
1817 
1818  /// ShouldXRayInstrument - Return true if the current function should be
1819  /// instrumented with XRay nop sleds.
1820  bool ShouldXRayInstrumentFunction() const;
1821 
1822  /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
1823  /// XRay custom event handling calls.
1824  bool AlwaysEmitXRayCustomEvents() const;
1825 
1826  /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
1827  /// XRay typed event handling calls.
1828  bool AlwaysEmitXRayTypedEvents() const;
1829 
1830  /// Encode an address into a form suitable for use in a function prologue.
1831  llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
1832  llvm::Constant *Addr);
1833 
1834  /// Decode an address used in a function prologue, encoded by \c
1835  /// EncodeAddrForUseInPrologue.
1836  llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
1837  llvm::Value *EncodedAddr);
1838 
1839  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1840  /// arguments for the given function. This is also responsible for naming the
1841  /// LLVM function arguments.
1842  void EmitFunctionProlog(const CGFunctionInfo &FI,
1843  llvm::Function *Fn,
1844  const FunctionArgList &Args);
1845 
1846  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1847  /// given temporary.
1848  void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
1849  SourceLocation EndLoc);
1850 
1851  /// Emit a test that checks if the return value \p RV is nonnull.
1852  void EmitReturnValueCheck(llvm::Value *RV);
1853 
1854  /// EmitStartEHSpec - Emit the start of the exception spec.
1855  void EmitStartEHSpec(const Decl *D);
1856 
1857  /// EmitEndEHSpec - Emit the end of the exception spec.
1858  void EmitEndEHSpec(const Decl *D);
1859 
1860  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1861  llvm::BasicBlock *getTerminateLandingPad();
1862 
1863  /// getTerminateLandingPad - Return a cleanup funclet that just calls
1864  /// terminate.
1865  llvm::BasicBlock *getTerminateFunclet();
1866 
1867  /// getTerminateHandler - Return a handler (not a landing pad, just
1868  /// a catch handler) that just calls terminate. This is used when
1869  /// a terminate scope encloses a try.
1870  llvm::BasicBlock *getTerminateHandler();
1871 
1872  llvm::Type *ConvertTypeForMem(QualType T);
1873  llvm::Type *ConvertType(QualType T);
1874  llvm::Type *ConvertType(const TypeDecl *T) {
1875  return ConvertType(getContext().getTypeDeclType(T));
1876  }
1877 
1878  /// LoadObjCSelf - Load the value of self. This function is only valid while
1879  /// generating code for an Objective-C method.
1880  llvm::Value *LoadObjCSelf();
1881 
1882  /// TypeOfSelfObject - Return type of object that this self represents.
1883  QualType TypeOfSelfObject();
1884 
1885  /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
1886  static TypeEvaluationKind getEvaluationKind(QualType T);
1887 
1889  return getEvaluationKind(T) == TEK_Scalar;
1890  }
1891 
1893  return getEvaluationKind(T) == TEK_Aggregate;
1894  }
1895 
1896  /// createBasicBlock - Create an LLVM basic block.
1897  llvm::BasicBlock *createBasicBlock(const Twine &name = "",
1898  llvm::Function *parent = nullptr,
1899  llvm::BasicBlock *before = nullptr) {
1900  return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1901  }
1902 
1903  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1904  /// label maps to.
1905  JumpDest getJumpDestForLabel(const LabelDecl *S);
1906 
1907  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1908  /// another basic block, simplify it. This assumes that no other code could
1909  /// potentially reference the basic block.
1910  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1911 
1912  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1913  /// adding a fall-through branch from the current insert block if
1914  /// necessary. It is legal to call this function even if there is no current
1915  /// insertion point.
1916  ///
1917  /// IsFinished - If true, indicates that the caller has finished emitting
1918  /// branches to the given block and does not expect to emit code into it. This
1919  /// means the block can be ignored if it is unreachable.
1920  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1921 
1922  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1923  /// near its uses, and leave the insertion point in it.
1924  void EmitBlockAfterUses(llvm::BasicBlock *BB);
1925 
1926  /// EmitBranch - Emit a branch to the specified basic block from the current
1927  /// insert block, taking care to avoid creation of branches from dummy
1928  /// blocks. It is legal to call this function even if there is no current
1929  /// insertion point.
1930  ///
1931  /// This function clears the current insertion point. The caller should follow
1932  /// calls to this function with calls to Emit*Block prior to generation new
1933  /// code.
1934  void EmitBranch(llvm::BasicBlock *Block);
1935 
1936  /// HaveInsertPoint - True if an insertion point is defined. If not, this
1937  /// indicates that the current code being emitted is unreachable.
1938  bool HaveInsertPoint() const {
1939  return Builder.GetInsertBlock() != nullptr;
1940  }
1941 
1942  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1943  /// emitted IR has a place to go. Note that by definition, if this function
1944  /// creates a block then that block is unreachable; callers may do better to
1945  /// detect when no insertion point is defined and simply skip IR generation.
1947  if (!HaveInsertPoint())
1948  EmitBlock(createBasicBlock());
1949  }
1950 
1951  /// ErrorUnsupported - Print out an error that codegen doesn't support the
1952  /// specified stmt yet.
1953  void ErrorUnsupported(const Stmt *S, const char *Type);
1954 
1955  //===--------------------------------------------------------------------===//
1956  // Helpers
1957  //===--------------------------------------------------------------------===//
1958 
1961  return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
1962  CGM.getTBAAAccessInfo(T));
1963  }
1964 
1966  TBAAAccessInfo TBAAInfo) {
1967  return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
1968  }
1969 
1972  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
1973  LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
1974  }
1975 
1977  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
1978  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
1979  BaseInfo, TBAAInfo);
1980  }
1981 
1982  LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
1983  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
1984  CharUnits getNaturalTypeAlignment(QualType T,
1985  LValueBaseInfo *BaseInfo = nullptr,
1986  TBAAAccessInfo *TBAAInfo = nullptr,
1987  bool forPointeeType = false);
1988  CharUnits getNaturalPointeeTypeAlignment(QualType T,
1989  LValueBaseInfo *BaseInfo = nullptr,
1990  TBAAAccessInfo *TBAAInfo = nullptr);
1991 
1992  Address EmitLoadOfReference(LValue RefLVal,
1993  LValueBaseInfo *PointeeBaseInfo = nullptr,
1994  TBAAAccessInfo *PointeeTBAAInfo = nullptr);
1995  LValue EmitLoadOfReferenceLValue(LValue RefLVal);
1997  AlignmentSource Source =
1999  LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2000  CGM.getTBAAAccessInfo(RefTy));
2001  return EmitLoadOfReferenceLValue(RefLVal);
2002  }
2003 
2004  Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2005  LValueBaseInfo *BaseInfo = nullptr,
2006  TBAAAccessInfo *TBAAInfo = nullptr);
2007  LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2008 
2009  /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2010  /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2011  /// insertion point of the builder. The caller is responsible for setting an
2012  /// appropriate alignment on
2013  /// the alloca.
2014  ///
2015  /// \p ArraySize is the number of array elements to be allocated if it
2016  /// is not nullptr.
2017  ///
2018  /// LangAS::Default is the address space of pointers to local variables and
2019  /// temporaries, as exposed in the source language. In certain
2020  /// configurations, this is not the same as the alloca address space, and a
2021  /// cast is needed to lift the pointer from the alloca AS into
2022  /// LangAS::Default. This can happen when the target uses a restricted
2023  /// address space for the stack but the source language requires
2024  /// LangAS::Default to be a generic address space. The latter condition is
2025  /// common for most programming languages; OpenCL is an exception in that
2026  /// LangAS::Default is the private address space, which naturally maps
2027  /// to the stack.
2028  ///
2029  /// Because the address of a temporary is often exposed to the program in
2030  /// various ways, this function will perform the cast. The original alloca
2031  /// instruction is returned through \p Alloca if it is not nullptr.
2032  ///
2033  /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2034  /// more efficient if the caller knows that the address will not be exposed.
2035  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2036  llvm::Value *ArraySize = nullptr);
2037  Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
2038  const Twine &Name = "tmp",
2039  llvm::Value *ArraySize = nullptr,
2040  Address *Alloca = nullptr);
2041  Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2042  const Twine &Name = "tmp",
2043  llvm::Value *ArraySize = nullptr);
2044 
2045  /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2046  /// default ABI alignment of the given LLVM type.
2047  ///
2048  /// IMPORTANT NOTE: This is *not* generally the right alignment for
2049  /// any given AST type that happens to have been lowered to the
2050  /// given IR type. This should only ever be used for function-local,
2051  /// IR-driven manipulations like saving and restoring a value. Do
2052  /// not hand this address off to arbitrary IRGen routines, and especially
2053  /// do not pass it as an argument to a function that might expect a
2054  /// properly ABI-aligned value.
2055  Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2056  const Twine &Name = "tmp");
2057 
2058  /// InitTempAlloca - Provide an initial value for the given alloca which
2059  /// will be observable at all locations in the function.
2060  ///
2061  /// The address should be something that was returned from one of
2062  /// the CreateTempAlloca or CreateMemTemp routines, and the
2063  /// initializer must be valid in the entry block (i.e. it must
2064  /// either be a constant or an argument value).
2065  void InitTempAlloca(Address Alloca, llvm::Value *Value);
2066 
2067  /// CreateIRTemp - Create a temporary IR object of the given type, with
2068  /// appropriate alignment. This routine should only be used when an temporary
2069  /// value needs to be stored into an alloca (for example, to avoid explicit
2070  /// PHI construction), but the type is the IR type, not the type appropriate
2071  /// for storing in memory.
2072  ///
2073  /// That is, this is exactly equivalent to CreateMemTemp, but calling
2074  /// ConvertType instead of ConvertTypeForMem.
2075  Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
2076 
2077  /// CreateMemTemp - Create a temporary memory object of the given type, with
2078  /// appropriate alignmen and cast it to the default address space. Returns
2079  /// the original alloca instruction by \p Alloca if it is not nullptr.
2080  Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
2081  Address *Alloca = nullptr);
2082  Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
2083  Address *Alloca = nullptr);
2084 
2085  /// CreateMemTemp - Create a temporary memory object of the given type, with
2086  /// appropriate alignmen without casting it to the default address space.
2087  Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2088  Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
2089  const Twine &Name = "tmp");
2090 
2091  /// CreateAggTemp - Create a temporary memory object for the given
2092  /// aggregate type.
2093  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
2094  return AggValueSlot::forAddr(CreateMemTemp(T, Name),
2095  T.getQualifiers(),
2096  AggValueSlot::IsNotDestructed,
2097  AggValueSlot::DoesNotNeedGCBarriers,
2098  AggValueSlot::IsNotAliased,
2099  AggValueSlot::DoesNotOverlap);
2100  }
2101 
2102  /// Emit a cast to void* in the appropriate address space.
2103  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
2104 
2105  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2106  /// expression and compare the result against zero, returning an Int1Ty value.
2107  llvm::Value *EvaluateExprAsBool(const Expr *E);
2108 
2109  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2110  void EmitIgnoredExpr(const Expr *E);
2111 
2112  /// EmitAnyExpr - Emit code to compute the specified expression which can have
2113  /// any type. The result is returned as an RValue struct. If this is an
2114  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2115  /// the result should be returned.
2116  ///
2117  /// \param ignoreResult True if the resulting value isn't used.
2118  RValue EmitAnyExpr(const Expr *E,
2119  AggValueSlot aggSlot = AggValueSlot::ignored(),
2120  bool ignoreResult = false);
2121 
2122  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2123  // or the value of the expression, depending on how va_list is defined.
2124  Address EmitVAListRef(const Expr *E);
2125 
2126  /// Emit a "reference" to a __builtin_ms_va_list; this is
2127  /// always the value of the expression, because a __builtin_ms_va_list is a
2128  /// pointer to a char.
2129  Address EmitMSVAListRef(const Expr *E);
2130 
2131  /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2132  /// always be accessible even if no aggregate location is provided.
2133  RValue EmitAnyExprToTemp(const Expr *E);
2134 
2135  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2136  /// arbitrary expression into the given memory location.
2137  void EmitAnyExprToMem(const Expr *E, Address Location,
2138  Qualifiers Quals, bool IsInitializer);
2139 
2140  void EmitAnyExprToExn(const Expr *E, Address Addr);
2141 
2142  /// EmitExprAsInit - Emits the code necessary to initialize a
2143  /// location in memory with the given initializer.
2144  void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2145  bool capturedByInit);
2146 
2147  /// hasVolatileMember - returns true if aggregate type has a volatile
2148  /// member.
2150  if (const RecordType *RT = T->getAs<RecordType>()) {
2151  const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2152  return RD->hasVolatileMember();
2153  }
2154  return false;
2155  }
2156 
2157  /// Determine whether a return value slot may overlap some other object.
2159  // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2160  // class subobjects. These cases may need to be revisited depending on the
2161  // resolution of the relevant core issue.
2162  return AggValueSlot::DoesNotOverlap;
2163  }
2164 
2165  /// Determine whether a field initialization may overlap some other object.
2167  // FIXME: These cases can result in overlap as a result of P0840R0's
2168  // [[no_unique_address]] attribute. We can still infer NoOverlap in the
2169  // presence of that attribute if the field is within the nvsize of its
2170  // containing class, because non-virtual subobjects are initialized in
2171  // address order.
2172  return AggValueSlot::DoesNotOverlap;
2173  }
2174 
2175  /// Determine whether a base class initialization may overlap some other
2176  /// object.
2177  AggValueSlot::Overlap_t overlapForBaseInit(const CXXRecordDecl *RD,
2178  const CXXRecordDecl *BaseRD,
2179  bool IsVirtual);
2180 
2181  /// Emit an aggregate assignment.
2182  void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
2183  bool IsVolatile = hasVolatileMember(EltTy);
2184  EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2185  }
2186 
2188  AggValueSlot::Overlap_t MayOverlap) {
2189  EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2190  }
2191 
2192  /// EmitAggregateCopy - Emit an aggregate copy.
2193  ///
2194  /// \param isVolatile \c true iff either the source or the destination is
2195  /// volatile.
2196  /// \param MayOverlap Whether the tail padding of the destination might be
2197  /// occupied by some other object. More efficient code can often be
2198  /// generated if not.
2199  void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
2200  AggValueSlot::Overlap_t MayOverlap,
2201  bool isVolatile = false);
2202 
2203  /// GetAddrOfLocalVar - Return the address of a local variable.
2205  auto it = LocalDeclMap.find(VD);
2206  assert(it != LocalDeclMap.end() &&
2207  "Invalid argument to GetAddrOfLocalVar(), no decl!");
2208  return it->second;
2209  }
2210 
2211  /// Given an opaque value expression, return its LValue mapping if it exists,
2212  /// otherwise create one.
2213  LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
2214 
2215  /// Given an opaque value expression, return its RValue mapping if it exists,
2216  /// otherwise create one.
2217  RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
2218 
2219  /// Get the index of the current ArrayInitLoopExpr, if any.
2220  llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
2221 
2222  /// getAccessedFieldNo - Given an encoded value and a result number, return
2223  /// the input field number being accessed.
2224  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
2225 
2226  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
2227  llvm::BasicBlock *GetIndirectGotoBlock();
2228 
2229  /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
2230  static bool IsWrappedCXXThis(const Expr *E);
2231 
2232  /// EmitNullInitialization - Generate code to set a value of the given type to
2233  /// null, If the type contains data member pointers, they will be initialized
2234  /// to -1 in accordance with the Itanium C++ ABI.
2235  void EmitNullInitialization(Address DestPtr, QualType Ty);
2236 
2237  /// Emits a call to an LLVM variable-argument intrinsic, either
2238  /// \c llvm.va_start or \c llvm.va_end.
2239  /// \param ArgValue A reference to the \c va_list as emitted by either
2240  /// \c EmitVAListRef or \c EmitMSVAListRef.
2241  /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
2242  /// calls \c llvm.va_end.
2243  llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
2244 
2245  /// Generate code to get an argument from the passed in pointer
2246  /// and update it accordingly.
2247  /// \param VE The \c VAArgExpr for which to generate code.
2248  /// \param VAListAddr Receives a reference to the \c va_list as emitted by
2249  /// either \c EmitVAListRef or \c EmitMSVAListRef.
2250  /// \returns A pointer to the argument.
2251  // FIXME: We should be able to get rid of this method and use the va_arg
2252  // instruction in LLVM instead once it works well enough.
2253  Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
2254 
2255  /// emitArrayLength - Compute the length of an array, even if it's a
2256  /// VLA, and drill down to the base element type.
2257  llvm::Value *emitArrayLength(const ArrayType *arrayType,
2258  QualType &baseType,
2259  Address &addr);
2260 
2261  /// EmitVLASize - Capture all the sizes for the VLA expressions in
2262  /// the given variably-modified type and store them in the VLASizeMap.
2263  ///
2264  /// This function can be called with a null (unreachable) insert point.
2265  void EmitVariablyModifiedType(QualType Ty);
2266 
2267  struct VlaSizePair {
2270 
2271  VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
2272  };
2273 
2274  /// Return the number of elements for a single dimension
2275  /// for the given array type.
2276  VlaSizePair getVLAElements1D(const VariableArrayType *vla);
2277  VlaSizePair getVLAElements1D(QualType vla);
2278 
2279  /// Returns an LLVM value that corresponds to the size,
2280  /// in non-variably-sized elements, of a variable length array type,
2281  /// plus that largest non-variably-sized element type. Assumes that
2282  /// the type has already been emitted with EmitVariablyModifiedType.
2283  VlaSizePair getVLASize(const VariableArrayType *vla);
2284  VlaSizePair getVLASize(QualType vla);
2285 
2286  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
2287  /// generating code for an C++ member function.
2289  assert(CXXThisValue && "no 'this' value for this function");
2290  return CXXThisValue;
2291  }
2292  Address LoadCXXThisAddress();
2293 
2294  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
2295  /// virtual bases.
2296  // FIXME: Every place that calls LoadCXXVTT is something
2297  // that needs to be abstracted properly.
2299  assert(CXXStructorImplicitParamValue && "no VTT value for this function");
2300  return CXXStructorImplicitParamValue;
2301  }
2302 
2303  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
2304  /// complete class to the given direct base.
2305  Address
2306  GetAddressOfDirectBaseInCompleteClass(Address Value,
2307  const CXXRecordDecl *Derived,
2308  const CXXRecordDecl *Base,
2309  bool BaseIsVirtual);
2310 
2311  static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
2312 
2313  /// GetAddressOfBaseClass - This function will add the necessary delta to the
2314  /// load of 'this' and returns address of the base class.
2315  Address GetAddressOfBaseClass(Address Value,
2316  const CXXRecordDecl *Derived,
2319  bool NullCheckValue, SourceLocation Loc);
2320 
2321  Address GetAddressOfDerivedClass(Address Value,
2322  const CXXRecordDecl *Derived,
2325  bool NullCheckValue);
2326 
2327  /// GetVTTParameter - Return the VTT parameter that should be passed to a
2328  /// base constructor/destructor with virtual bases.
2329  /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
2330  /// to ItaniumCXXABI.cpp together with all the references to VTT.
2331  llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
2332  bool Delegating);
2333 
2334  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
2335  CXXCtorType CtorType,
2336  const FunctionArgList &Args,
2337  SourceLocation Loc);
2338  // It's important not to confuse this and the previous function. Delegating
2339  // constructors are the C++0x feature. The constructor delegate optimization
2340  // is used to reduce duplication in the base and complete consturctors where
2341  // they are substantially the same.
2342  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2343  const FunctionArgList &Args);
2344 
2345  /// Emit a call to an inheriting constructor (that is, one that invokes a
2346  /// constructor inherited from a base class) by inlining its definition. This
2347  /// is necessary if the ABI does not support forwarding the arguments to the
2348  /// base class constructor (because they're variadic or similar).
2349  void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2350  CXXCtorType CtorType,
2351  bool ForVirtualBase,
2352  bool Delegating,
2353  CallArgList &Args);
2354 
2355  /// Emit a call to a constructor inherited from a base class, passing the
2356  /// current constructor's arguments along unmodified (without even making
2357  /// a copy).
2358  void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
2359  bool ForVirtualBase, Address This,
2360  bool InheritedFromVBase,
2361  const CXXInheritedCtorInitExpr *E);
2362 
2363  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2364  bool ForVirtualBase, bool Delegating,
2365  Address This, const CXXConstructExpr *E,
2366  AggValueSlot::Overlap_t Overlap);
2367 
2368  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2369  bool ForVirtualBase, bool Delegating,
2370  Address This, CallArgList &Args,
2371  AggValueSlot::Overlap_t Overlap,
2372  SourceLocation Loc);
2373 
2374  /// Emit assumption load for all bases. Requires to be be called only on
2375  /// most-derived class and not under construction of the object.
2376  void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
2377 
2378  /// Emit assumption that vptr load == global vtable.
2379  void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
2380 
2381  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
2382  Address This, Address Src,
2383  const CXXConstructExpr *E);
2384 
2385  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2386  const ArrayType *ArrayTy,
2387  Address ArrayPtr,
2388  const CXXConstructExpr *E,
2389  bool ZeroInitialization = false);
2390 
2391  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2392  llvm::Value *NumElements,
2393  Address ArrayPtr,
2394  const CXXConstructExpr *E,
2395  bool ZeroInitialization = false);
2396 
2397  static Destroyer destroyCXXObject;
2398 
2399  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
2400  bool ForVirtualBase, bool Delegating,
2401  Address This);
2402 
2403  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
2404  llvm::Type *ElementTy, Address NewPtr,
2405  llvm::Value *NumElements,
2406  llvm::Value *AllocSizeWithoutCookie);
2407 
2408  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
2409  Address Ptr);
2410 
2411  llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
2412  void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
2413 
2414  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
2415  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
2416 
2417  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
2418  QualType DeleteTy, llvm::Value *NumElements = nullptr,
2419  CharUnits CookieSize = CharUnits());
2420 
2421  RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
2422  const CallExpr *TheCallExpr, bool IsDelete);
2423 
2424  llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
2425  llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
2426  Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
2427 
2428  /// Situations in which we might emit a check for the suitability of a
2429  /// pointer or glvalue.
2431  /// Checking the operand of a load. Must be suitably sized and aligned.
2433  /// Checking the destination of a store. Must be suitably sized and aligned.
2435  /// Checking the bound value in a reference binding. Must be suitably sized
2436  /// and aligned, but is not required to refer to an object (until the
2437  /// reference is used), per core issue 453.
2439  /// Checking the object expression in a non-static data member access. Must
2440  /// be an object within its lifetime.
2442  /// Checking the 'this' pointer for a call to a non-static member function.
2443  /// Must be an object within its lifetime.
2445  /// Checking the 'this' pointer for a constructor call.
2447  /// Checking the operand of a static_cast to a derived pointer type. Must be
2448  /// null or an object within its lifetime.
2450  /// Checking the operand of a static_cast to a derived reference type. Must
2451  /// be an object within its lifetime.
2453  /// Checking the operand of a cast to a base object. Must be suitably sized
2454  /// and aligned.
2456  /// Checking the operand of a cast to a virtual base object. Must be an
2457  /// object within its lifetime.
2459  /// Checking the value assigned to a _Nonnull pointer. Must not be null.
2461  /// Checking the operand of a dynamic_cast or a typeid expression. Must be
2462  /// null or an object within its lifetime.
2463  TCK_DynamicOperation
2464  };
2465 
2466  /// Determine whether the pointer type check \p TCK permits null pointers.
2467  static bool isNullPointerAllowed(TypeCheckKind TCK);
2468 
2469  /// Determine whether the pointer type check \p TCK requires a vptr check.
2470  static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
2471 
2472  /// Whether any type-checking sanitizers are enabled. If \c false,
2473  /// calls to EmitTypeCheck can be skipped.
2474  bool sanitizePerformTypeCheck() const;
2475 
2476  /// Emit a check that \p V is the address of storage of the
2477  /// appropriate size and alignment for an object of type \p Type.
2478  void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
2479  QualType Type, CharUnits Alignment = CharUnits::Zero(),
2480  SanitizerSet SkippedChecks = SanitizerSet());
2481 
2482  /// Emit a check that \p Base points into an array object, which
2483  /// we can access at index \p Index. \p Accessed should be \c false if we
2484  /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
2485  void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
2486  QualType IndexType, bool Accessed);
2487 
2488  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2489  bool isInc, bool isPre);
2490  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
2491  bool isInc, bool isPre);
2492 
2493  void EmitAlignmentAssumption(llvm::Value *PtrValue, unsigned Alignment,
2494  llvm::Value *OffsetValue = nullptr) {
2495  Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
2496  OffsetValue);
2497  }
2498 
2499  /// Converts Location to a DebugLoc, if debug information is enabled.
2500  llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
2501 
2502 
2503  //===--------------------------------------------------------------------===//
2504  // Declaration Emission
2505  //===--------------------------------------------------------------------===//
2506 
2507  /// EmitDecl - Emit a declaration.
2508  ///
2509  /// This function can be called with a null (unreachable) insert point.
2510  void EmitDecl(const Decl &D);
2511 
2512  /// EmitVarDecl - Emit a local variable declaration.
2513  ///
2514  /// This function can be called with a null (unreachable) insert point.
2515  void EmitVarDecl(const VarDecl &D);
2516 
2517  void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2518  bool capturedByInit);
2519 
2520  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
2521  llvm::Value *Address);
2522 
2523  /// Determine whether the given initializer is trivial in the sense
2524  /// that it requires no code to be generated.
2525  bool isTrivialInitializer(const Expr *Init);
2526 
2527  /// EmitAutoVarDecl - Emit an auto variable declaration.
2528  ///
2529  /// This function can be called with a null (unreachable) insert point.
2530  void EmitAutoVarDecl(const VarDecl &D);
2531 
2533  friend class CodeGenFunction;
2534 
2535  const VarDecl *Variable;
2536 
2537  /// The address of the alloca for languages with explicit address space
2538  /// (e.g. OpenCL) or alloca casted to generic pointer for address space
2539  /// agnostic languages (e.g. C++). Invalid if the variable was emitted
2540  /// as a global constant.
2541  Address Addr;
2542 
2543  llvm::Value *NRVOFlag;
2544 
2545  /// True if the variable is a __block variable.
2546  bool IsByRef;
2547 
2548  /// True if the variable is of aggregate type and has a constant
2549  /// initializer.
2550  bool IsConstantAggregate;
2551 
2552  /// Non-null if we should use lifetime annotations.
2553  llvm::Value *SizeForLifetimeMarkers;
2554 
2555  /// Address with original alloca instruction. Invalid if the variable was
2556  /// emitted as a global constant.
2557  Address AllocaAddr;
2558 
2559  struct Invalid {};
2560  AutoVarEmission(Invalid)
2561  : Variable(nullptr), Addr(Address::invalid()),
2562  AllocaAddr(Address::invalid()) {}
2563 
2564  AutoVarEmission(const VarDecl &variable)
2565  : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
2566  IsByRef(false), IsConstantAggregate(false),
2567  SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
2568 
2569  bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
2570 
2571  public:
2572  static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
2573 
2574  bool useLifetimeMarkers() const {
2575  return SizeForLifetimeMarkers != nullptr;
2576  }
2578  assert(useLifetimeMarkers());
2579  return SizeForLifetimeMarkers;
2580  }
2581 
2582  /// Returns the raw, allocated address, which is not necessarily
2583  /// the address of the object itself. It is casted to default
2584  /// address space for address space agnostic languages.
2586  return Addr;
2587  }
2588 
2589  /// Returns the address for the original alloca instruction.
2590  Address getOriginalAllocatedAddress() const { return AllocaAddr; }
2591 
2592  /// Returns the address of the object within this declaration.
2593  /// Note that this does not chase the forwarding pointer for
2594  /// __block decls.
2595  Address getObjectAddress(CodeGenFunction &CGF) const {
2596  if (!IsByRef) return Addr;
2597 
2598  return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
2599  }
2600  };
2601  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
2602  void EmitAutoVarInit(const AutoVarEmission &emission);
2603  void EmitAutoVarCleanups(const AutoVarEmission &emission);
2604  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
2605  QualType::DestructionKind dtorKind);
2606 
2607  /// Emits the alloca and debug information for the size expressions for each
2608  /// dimension of an array. It registers the association of its (1-dimensional)
2609  /// QualTypes and size expression's debug node, so that CGDebugInfo can
2610  /// reference this node when creating the DISubrange object to describe the
2611  /// array types.
2612  void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI,
2613  const VarDecl &D,
2614  bool EmitDebugInfo);
2615 
2616  void EmitStaticVarDecl(const VarDecl &D,
2617  llvm::GlobalValue::LinkageTypes Linkage);
2618 
2619  class ParamValue {
2620  llvm::Value *Value;
2621  unsigned Alignment;
2622  ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
2623  public:
2625  return ParamValue(value, 0);
2626  }
2628  assert(!addr.getAlignment().isZero());
2629  return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
2630  }
2631 
2632  bool isIndirect() const { return Alignment != 0; }
2633  llvm::Value *getAnyValue() const { return Value; }
2634 
2636  assert(!isIndirect());
2637  return Value;
2638  }
2639 
2641  assert(isIndirect());
2642  return Address(Value, CharUnits::fromQuantity(Alignment));
2643  }
2644  };
2645 
2646  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
2647  void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
2648 
2649  /// protectFromPeepholes - Protect a value that we're intending to
2650  /// store to the side, but which will probably be used later, from
2651  /// aggressive peepholing optimizations that might delete it.
2652  ///
2653  /// Pass the result to unprotectFromPeepholes to declare that
2654  /// protection is no longer required.
2655  ///
2656  /// There's no particular reason why this shouldn't apply to
2657  /// l-values, it's just that no existing peepholes work on pointers.
2658  PeepholeProtection protectFromPeepholes(RValue rvalue);
2659  void unprotectFromPeepholes(PeepholeProtection protection);
2660 
2662  llvm::Value *OffsetValue = nullptr) {
2663  Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
2664  OffsetValue);
2665  }
2666 
2667  //===--------------------------------------------------------------------===//
2668  // Statement Emission
2669  //===--------------------------------------------------------------------===//
2670 
2671  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
2672  void EmitStopPoint(const Stmt *S);
2673 
2674  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
2675  /// this function even if there is no current insertion point.
2676  ///
2677  /// This function may clear the current insertion point; callers should use
2678  /// EnsureInsertPoint if they wish to subsequently generate code without first
2679  /// calling EmitBlock, EmitBranch, or EmitStmt.
2680  void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = None);
2681 
2682  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
2683  /// necessarily require an insertion point or debug information; typically
2684  /// because the statement amounts to a jump or a container of other
2685  /// statements.
2686  ///
2687  /// \return True if the statement was handled.
2688  bool EmitSimpleStmt(const Stmt *S);
2689 
2690  Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
2691  AggValueSlot AVS = AggValueSlot::ignored());
2692  Address EmitCompoundStmtWithoutScope(const CompoundStmt &S,
2693  bool GetLast = false,
2694  AggValueSlot AVS =
2695  AggValueSlot::ignored());
2696 
2697  /// EmitLabel - Emit the block for the given label. It is legal to call this
2698  /// function even if there is no current insertion point.
2699  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
2700 
2701  void EmitLabelStmt(const LabelStmt &S);
2702  void EmitAttributedStmt(const AttributedStmt &S);
2703  void EmitGotoStmt(const GotoStmt &S);
2704  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
2705  void EmitIfStmt(const IfStmt &S);
2706 
2707  void EmitWhileStmt(const WhileStmt &S,
2708  ArrayRef<const Attr *> Attrs = None);
2709  void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
2710  void EmitForStmt(const ForStmt &S,
2711  ArrayRef<const Attr *> Attrs = None);
2712  void EmitReturnStmt(const ReturnStmt &S);
2713  void EmitDeclStmt(const DeclStmt &S);
2714  void EmitBreakStmt(const BreakStmt &S);
2715  void EmitContinueStmt(const ContinueStmt &S);
2716  void EmitSwitchStmt(const SwitchStmt &S);
2717  void EmitDefaultStmt(const DefaultStmt &S);
2718  void EmitCaseStmt(const CaseStmt &S);
2719  void EmitCaseStmtRange(const CaseStmt &S);
2720  void EmitAsmStmt(const AsmStmt &S);
2721 
2722  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
2723  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
2724  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
2725  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
2726  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
2727 
2728  void EmitCoroutineBody(const CoroutineBodyStmt &S);
2729  void EmitCoreturnStmt(const CoreturnStmt &S);
2730  RValue EmitCoawaitExpr(const CoawaitExpr &E,
2731  AggValueSlot aggSlot = AggValueSlot::ignored(),
2732  bool ignoreResult = false);
2733  LValue EmitCoawaitLValue(const CoawaitExpr *E);
2734  RValue EmitCoyieldExpr(const CoyieldExpr &E,
2735  AggValueSlot aggSlot = AggValueSlot::ignored(),
2736  bool ignoreResult = false);
2737  LValue EmitCoyieldLValue(const CoyieldExpr *E);
2738  RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
2739 
2740  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2741  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2742 
2743  void EmitCXXTryStmt(const CXXTryStmt &S);
2744  void EmitSEHTryStmt(const SEHTryStmt &S);
2745  void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
2746  void EnterSEHTryStmt(const SEHTryStmt &S);
2747  void ExitSEHTryStmt(const SEHTryStmt &S);
2748 
2749  void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
2750  const Stmt *OutlinedStmt);
2751 
2752  llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
2753  const SEHExceptStmt &Except);
2754 
2755  llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
2756  const SEHFinallyStmt &Finally);
2757 
2758  void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
2759  llvm::Value *ParentFP,
2760  llvm::Value *EntryEBP);
2761  llvm::Value *EmitSEHExceptionCode();
2762  llvm::Value *EmitSEHExceptionInfo();
2763  llvm::Value *EmitSEHAbnormalTermination();
2764 
2765  /// Emit simple code for OpenMP directives in Simd-only mode.
2766  void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
2767 
2768  /// Scan the outlined statement for captures from the parent function. For
2769  /// each capture, mark the capture as escaped and emit a call to
2770  /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
2771  void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
2772  bool IsFilter);
2773 
2774  /// Recovers the address of a local in a parent function. ParentVar is the
2775  /// address of the variable used in the immediate parent function. It can
2776  /// either be an alloca or a call to llvm.localrecover if there are nested
2777  /// outlined functions. ParentFP is the frame pointer of the outermost parent
2778  /// frame.
2779  Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
2780  Address ParentVar,
2781  llvm::Value *ParentFP);
2782 
2783  void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
2784  ArrayRef<const Attr *> Attrs = None);
2785 
2786  /// Controls insertion of cancellation exit blocks in worksharing constructs.
2788  CodeGenFunction &CGF;
2789 
2790  public:
2792  bool HasCancel)
2793  : CGF(CGF) {
2794  CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
2795  }
2796  ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
2797  };
2798 
2799  /// Returns calculated size of the specified type.
2800  llvm::Value *getTypeSize(QualType Ty);
2801  LValue InitCapturedStruct(const CapturedStmt &S);
2802  llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
2803  llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
2804  Address GenerateCapturedStmtArgument(const CapturedStmt &S);
2805  llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
2806  void GenerateOpenMPCapturedVars(const CapturedStmt &S,
2807  SmallVectorImpl<llvm::Value *> &CapturedVars);
2808  void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
2809  SourceLocation Loc);
2810  /// Perform element by element copying of arrays with type \a
2811  /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
2812  /// generated by \a CopyGen.
2813  ///
2814  /// \param DestAddr Address of the destination array.
2815  /// \param SrcAddr Address of the source array.
2816  /// \param OriginalType Type of destination and source arrays.
2817  /// \param CopyGen Copying procedure that copies value of single array element
2818  /// to another single array element.
2819  void EmitOMPAggregateAssign(
2820  Address DestAddr, Address SrcAddr, QualType OriginalType,
2821  const llvm::function_ref<void(Address, Address)> CopyGen);
2822  /// Emit proper copying of data from one variable to another.
2823  ///
2824  /// \param OriginalType Original type of the copied variables.
2825  /// \param DestAddr Destination address.
2826  /// \param SrcAddr Source address.
2827  /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
2828  /// type of the base array element).
2829  /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
2830  /// the base array element).
2831  /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
2832  /// DestVD.
2833  void EmitOMPCopy(QualType OriginalType,
2834  Address DestAddr, Address SrcAddr,
2835  const VarDecl *DestVD, const VarDecl *SrcVD,
2836  const Expr *Copy);
2837  /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
2838  /// \a X = \a E \a BO \a E.
2839  ///
2840  /// \param X Value to be updated.
2841  /// \param E Update value.
2842  /// \param BO Binary operation for update operation.
2843  /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
2844  /// expression, false otherwise.
2845  /// \param AO Atomic ordering of the generated atomic instructions.
2846  /// \param CommonGen Code generator for complex expressions that cannot be
2847  /// expressed through atomicrmw instruction.
2848  /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
2849  /// generated, <false, RValue::get(nullptr)> otherwise.
2850  std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
2851  LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
2852  llvm::AtomicOrdering AO, SourceLocation Loc,
2853  const llvm::function_ref<RValue(RValue)> CommonGen);
2854  bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
2855  OMPPrivateScope &PrivateScope);
2856  void EmitOMPPrivateClause(const OMPExecutableDirective &D,
2857  OMPPrivateScope &PrivateScope);
2858  void EmitOMPUseDevicePtrClause(
2859  const OMPClause &C, OMPPrivateScope &PrivateScope,
2860  const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
2861  /// Emit code for copyin clause in \a D directive. The next code is
2862  /// generated at the start of outlined functions for directives:
2863  /// \code
2864  /// threadprivate_var1 = master_threadprivate_var1;
2865  /// operator=(threadprivate_var2, master_threadprivate_var2);
2866  /// ...
2867  /// __kmpc_barrier(&loc, global_tid);
2868  /// \endcode
2869  ///
2870  /// \param D OpenMP directive possibly with 'copyin' clause(s).
2871  /// \returns true if at least one copyin variable is found, false otherwise.
2872  bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
2873  /// Emit initial code for lastprivate variables. If some variable is
2874  /// not also firstprivate, then the default initialization is used. Otherwise
2875  /// initialization of this variable is performed by EmitOMPFirstprivateClause
2876  /// method.
2877  ///
2878  /// \param D Directive that may have 'lastprivate' directives.
2879  /// \param PrivateScope Private scope for capturing lastprivate variables for
2880  /// proper codegen in internal captured statement.
2881  ///
2882  /// \returns true if there is at least one lastprivate variable, false
2883  /// otherwise.
2884  bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
2885  OMPPrivateScope &PrivateScope);
2886  /// Emit final copying of lastprivate values to original variables at
2887  /// the end of the worksharing or simd directive.
2888  ///
2889  /// \param D Directive that has at least one 'lastprivate' directives.
2890  /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
2891  /// it is the last iteration of the loop code in associated directive, or to
2892  /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
2893  void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
2894  bool NoFinals,
2895  llvm::Value *IsLastIterCond = nullptr);
2896  /// Emit initial code for linear clauses.
2897  void EmitOMPLinearClause(const OMPLoopDirective &D,
2898  CodeGenFunction::OMPPrivateScope &PrivateScope);
2899  /// Emit final code for linear clauses.
2900  /// \param CondGen Optional conditional code for final part of codegen for
2901  /// linear clause.
2902  void EmitOMPLinearClauseFinal(
2903  const OMPLoopDirective &D,
2904  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
2905  /// Emit initial code for reduction variables. Creates reduction copies
2906  /// and initializes them with the values according to OpenMP standard.
2907  ///
2908  /// \param D Directive (possibly) with the 'reduction' clause.
2909  /// \param PrivateScope Private scope for capturing reduction variables for
2910  /// proper codegen in internal captured statement.
2911  ///
2912  void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
2913  OMPPrivateScope &PrivateScope);
2914  /// Emit final update of reduction values to original variables at
2915  /// the end of the directive.
2916  ///
2917  /// \param D Directive that has at least one 'reduction' directives.
2918  /// \param ReductionKind The kind of reduction to perform.
2919  void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
2920  const OpenMPDirectiveKind ReductionKind);
2921  /// Emit initial code for linear variables. Creates private copies
2922  /// and initializes them with the values according to OpenMP standard.
2923  ///
2924  /// \param D Directive (possibly) with the 'linear' clause.
2925  /// \return true if at least one linear variable is found that should be
2926  /// initialized with the value of the original variable, false otherwise.
2927  bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
2928 
2929  typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
2930  llvm::Value * /*OutlinedFn*/,
2931  const OMPTaskDataTy & /*Data*/)>
2933  void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
2934  const OpenMPDirectiveKind CapturedRegion,
2935  const RegionCodeGenTy &BodyGen,
2936  const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
2938  Address BasePointersArray = Address::invalid();
2939  Address PointersArray = Address::invalid();
2940  Address SizesArray = Address::invalid();
2941  unsigned NumberOfTargetItems = 0;
2942  explicit OMPTargetDataInfo() = default;
2943  OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
2944  Address SizesArray, unsigned NumberOfTargetItems)
2945  : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
2946  SizesArray(SizesArray), NumberOfTargetItems(NumberOfTargetItems) {}
2947  };
2948  void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
2949  const RegionCodeGenTy &BodyGen,
2950  OMPTargetDataInfo &InputInfo);
2951 
2952  void EmitOMPParallelDirective(const OMPParallelDirective &S);
2953  void EmitOMPSimdDirective(const OMPSimdDirective &S);
2954  void EmitOMPForDirective(const OMPForDirective &S);
2955  void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
2956  void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
2957  void EmitOMPSectionDirective(const OMPSectionDirective &S);
2958  void EmitOMPSingleDirective(const OMPSingleDirective &S);
2959  void EmitOMPMasterDirective(const OMPMasterDirective &S);
2960  void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
2961  void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
2962  void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
2963  void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
2964  void EmitOMPTaskDirective(const OMPTaskDirective &S);
2965  void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
2966  void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
2967  void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
2968  void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
2969  void EmitOMPFlushDirective(const OMPFlushDirective &S);
2970  void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
2971  void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
2972  void EmitOMPTargetDirective(const OMPTargetDirective &S);
2973  void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
2974  void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
2975  void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
2976  void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
2977  void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
2978  void
2979  EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
2980  void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
2981  void
2982  EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
2983  void EmitOMPCancelDirective(const OMPCancelDirective &S);
2984  void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
2985  void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
2986  void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
2987  void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
2988  void EmitOMPDistributeParallelForDirective(
2990  void EmitOMPDistributeParallelForSimdDirective(
2992  void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
2993  void EmitOMPTargetParallelForSimdDirective(
2995  void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
2996  void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
2997  void
2998  EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
2999  void EmitOMPTeamsDistributeParallelForSimdDirective(
3001  void EmitOMPTeamsDistributeParallelForDirective(
3003  void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3004  void EmitOMPTargetTeamsDistributeDirective(
3006  void EmitOMPTargetTeamsDistributeParallelForDirective(
3008  void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3010  void EmitOMPTargetTeamsDistributeSimdDirective(
3012 
3013  /// Emit device code for the target directive.
3014  static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3015  StringRef ParentName,
3016  const OMPTargetDirective &S);
3017  static void
3018  EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3019  const OMPTargetParallelDirective &S);
3020  /// Emit device code for the target parallel for directive.
3021  static void EmitOMPTargetParallelForDeviceFunction(
3022  CodeGenModule &CGM, StringRef ParentName,
3024  /// Emit device code for the target parallel for simd directive.
3025  static void EmitOMPTargetParallelForSimdDeviceFunction(
3026  CodeGenModule &CGM, StringRef ParentName,
3028  /// Emit device code for the target teams directive.
3029  static void
3030  EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3031  const OMPTargetTeamsDirective &S);
3032  /// Emit device code for the target teams distribute directive.
3033  static void EmitOMPTargetTeamsDistributeDeviceFunction(
3034  CodeGenModule &CGM, StringRef ParentName,
3036  /// Emit device code for the target teams distribute simd directive.
3037  static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3038  CodeGenModule &CGM, StringRef ParentName,
3040  /// Emit device code for the target simd directive.
3041  static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
3042  StringRef ParentName,
3043  const OMPTargetSimdDirective &S);
3044  /// Emit device code for the target teams distribute parallel for simd
3045  /// directive.
3046  static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3047  CodeGenModule &CGM, StringRef ParentName,
3049 
3050  static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3051  CodeGenModule &CGM, StringRef ParentName,
3053  /// Emit inner loop of the worksharing/simd construct.
3054  ///
3055  /// \param S Directive, for which the inner loop must be emitted.
3056  /// \param RequiresCleanup true, if directive has some associated private
3057  /// variables.
3058  /// \param LoopCond Bollean condition for loop continuation.
3059  /// \param IncExpr Increment expression for loop control variable.
3060  /// \param BodyGen Generator for the inner body of the inner loop.
3061  /// \param PostIncGen Genrator for post-increment code (required for ordered
3062  /// loop directvies).
3063  void EmitOMPInnerLoop(
3064  const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
3065  const Expr *IncExpr,
3066  const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3067  const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3068 
3069  JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
3070  /// Emit initial code for loop counters of loop-based directives.
3071  void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
3072  OMPPrivateScope &LoopScope);
3073 
3074  /// Helper for the OpenMP loop directives.
3075  void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
3076 
3077  /// Emit code for the worksharing loop-based directive.
3078  /// \return true, if this construct has any lastprivate clause, false -
3079  /// otherwise.
3080  bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
3081  const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3082  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3083 
3084  /// Emit code for the distribute loop-based directive.
3085  void EmitOMPDistributeLoop(const OMPLoopDirective &S,
3086  const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
3087 
3088  /// Helpers for the OpenMP loop directives.
3089  void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
3090  void EmitOMPSimdFinal(
3091  const OMPLoopDirective &D,
3092  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3093 
3094  /// Emits the lvalue for the expression with possibly captured variable.
3095  LValue EmitOMPSharedLValue(const Expr *E);
3096 
3097 private:
3098  /// Helpers for blocks.
3099  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
3100 
3101  /// struct with the values to be passed to the OpenMP loop-related functions
3102  struct OMPLoopArguments {
3103  /// loop lower bound
3104  Address LB = Address::invalid();
3105  /// loop upper bound
3106  Address UB = Address::invalid();
3107  /// loop stride
3108  Address ST = Address::invalid();
3109  /// isLastIteration argument for runtime functions
3110  Address IL = Address::invalid();
3111  /// Chunk value generated by sema
3112  llvm::Value *Chunk = nullptr;
3113  /// EnsureUpperBound
3114  Expr *EUB = nullptr;
3115  /// IncrementExpression
3116  Expr *IncExpr = nullptr;
3117  /// Loop initialization
3118  Expr *Init = nullptr;
3119  /// Loop exit condition
3120  Expr *Cond = nullptr;
3121  /// Update of LB after a whole chunk has been executed
3122  Expr *NextLB = nullptr;
3123  /// Update of UB after a whole chunk has been executed
3124  Expr *NextUB = nullptr;
3125  OMPLoopArguments() = default;
3126  OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
3127  llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
3128  Expr *IncExpr = nullptr, Expr *Init = nullptr,
3129  Expr *Cond = nullptr, Expr *NextLB = nullptr,
3130  Expr *NextUB = nullptr)
3131  : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
3132  IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
3133  NextUB(NextUB) {}
3134  };
3135  void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
3136  const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
3137  const OMPLoopArguments &LoopArgs,
3138  const CodeGenLoopTy &CodeGenLoop,
3139  const CodeGenOrderedTy &CodeGenOrdered);
3140  void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
3141  bool IsMonotonic, const OMPLoopDirective &S,
3142  OMPPrivateScope &LoopScope, bool Ordered,
3143  const OMPLoopArguments &LoopArgs,
3144  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3145  void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
3146  const OMPLoopDirective &S,
3147  OMPPrivateScope &LoopScope,
3148  const OMPLoopArguments &LoopArgs,
3149  const CodeGenLoopTy &CodeGenLoopContent);
3150  /// Emit code for sections directive.
3151  void EmitSections(const OMPExecutableDirective &S);
3152 
3153 public:
3154 
3155  //===--------------------------------------------------------------------===//
3156  // LValue Expression Emission
3157  //===--------------------------------------------------------------------===//
3158 
3159  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
3160  RValue GetUndefRValue(QualType Ty);
3161 
3162  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
3163  /// and issue an ErrorUnsupported style diagnostic (using the
3164  /// provided Name).
3165  RValue EmitUnsupportedRValue(const Expr *E,
3166  const char *Name);
3167 
3168  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
3169  /// an ErrorUnsupported style diagnostic (using the provided Name).
3170  LValue EmitUnsupportedLValue(const Expr *E,
3171  const char *Name);
3172 
3173  /// EmitLValue - Emit code to compute a designator that specifies the location
3174  /// of the expression.
3175  ///
3176  /// This can return one of two things: a simple address or a bitfield
3177  /// reference. In either case, the LLVM Value* in the LValue structure is
3178  /// guaranteed to be an LLVM pointer type.
3179  ///
3180  /// If this returns a bitfield reference, nothing about the pointee type of
3181  /// the LLVM value is known: For example, it may not be a pointer to an
3182  /// integer.
3183  ///
3184  /// If this returns a normal address, and if the lvalue's C type is fixed
3185  /// size, this method guarantees that the returned pointer type will point to
3186  /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
3187  /// variable length type, this is not possible.
3188  ///
3189  LValue EmitLValue(const Expr *E);
3190 
3191  /// Same as EmitLValue but additionally we generate checking code to
3192  /// guard against undefined behavior. This is only suitable when we know
3193  /// that the address will be used to access the object.
3194  LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
3195 
3196  RValue convertTempToRValue(Address addr, QualType type,
3197  SourceLocation Loc);
3198 
3199  void EmitAtomicInit(Expr *E, LValue lvalue);
3200 
3201  bool LValueIsSuitableForInlineAtomic(LValue Src);
3202 
3203  RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
3204  AggValueSlot Slot = AggValueSlot::ignored());
3205 
3206  RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
3207  llvm::AtomicOrdering AO, bool IsVolatile = false,
3208  AggValueSlot slot = AggValueSlot::ignored());
3209 
3210  void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
3211 
3212  void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
3213  bool IsVolatile, bool isInit);
3214 
3215  std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
3216  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
3217  llvm::AtomicOrdering Success =
3218  llvm::AtomicOrdering::SequentiallyConsistent,
3219  llvm::AtomicOrdering Failure =
3220  llvm::AtomicOrdering::SequentiallyConsistent,
3221  bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
3222 
3223  void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
3224  const llvm::function_ref<RValue(RValue)> &UpdateOp,
3225  bool IsVolatile);
3226 
3227  /// EmitToMemory - Change a scalar value from its value
3228  /// representation to its in-memory representation.
3229  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
3230 
3231  /// EmitFromMemory - Change a scalar value from its memory
3232  /// representation to its value representation.
3233  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
3234 
3235  /// Check if the scalar \p Value is within the valid range for the given
3236  /// type \p Ty.
3237  ///
3238  /// Returns true if a check is needed (even if the range is unknown).
3239  bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
3240  SourceLocation Loc);
3241 
3242  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3243  /// care to appropriately convert from the memory representation to
3244  /// the LLVM value representation.
3245  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3246  SourceLocation Loc,
3248  bool isNontemporal = false) {
3249  return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
3250  CGM.getTBAAAccessInfo(Ty), isNontemporal);
3251  }
3252 
3253  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3254  SourceLocation Loc, LValueBaseInfo BaseInfo,
3255  TBAAAccessInfo TBAAInfo,
3256  bool isNontemporal = false);
3257 
3258  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3259  /// care to appropriately convert from the memory representation to
3260  /// the LLVM value representation. The l-value must be a simple
3261  /// l-value.
3262  llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
3263 
3264  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3265  /// care to appropriately convert from the memory representation to
3266  /// the LLVM value representation.
3268  bool Volatile, QualType Ty,
3270  bool isInit = false, bool isNontemporal = false) {
3271  EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
3272  CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
3273  }
3274 
3275  void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
3276  bool Volatile, QualType Ty,
3277  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
3278  bool isInit = false, bool isNontemporal = false);
3279 
3280  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3281  /// care to appropriately convert from the memory representation to
3282  /// the LLVM value representation. The l-value must be a simple
3283  /// l-value. The isInit flag indicates whether this is an initialization.
3284  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
3285  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
3286 
3287  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
3288  /// this method emits the address of the lvalue, then loads the result as an
3289  /// rvalue, returning the rvalue.
3290  RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
3291  RValue EmitLoadOfExtVectorElementLValue(LValue V);
3292  RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
3293  RValue EmitLoadOfGlobalRegLValue(LValue LV);
3294 
3295  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
3296  /// lvalue, where both are guaranteed to the have the same type, and that type
3297  /// is 'Ty'.
3298  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
3299  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
3300  void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
3301 
3302  /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
3303  /// as EmitStoreThroughLValue.
3304  ///
3305  /// \param Result [out] - If non-null, this will be set to a Value* for the
3306  /// bit-field contents after the store, appropriate for use as the result of
3307  /// an assignment to the bit-field.
3308  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
3309  llvm::Value **Result=nullptr);
3310 
3311  /// Emit an l-value for an assignment (simple or compound) of complex type.
3312  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
3313  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
3314  LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
3315  llvm::Value *&Result);
3316 
3317  // Note: only available for agg return types
3318  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
3319  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
3320  // Note: only available for agg return types
3321  LValue EmitCallExprLValue(const CallExpr *E);
3322  // Note: only available for agg return types
3323  LValue EmitVAArgExprLValue(const VAArgExpr *E);
3324  LValue EmitDeclRefLValue(const DeclRefExpr *E);
3325  LValue EmitStringLiteralLValue(const StringLiteral *E);
3326  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
3327  LValue EmitPredefinedLValue(const PredefinedExpr *E);
3328  LValue EmitUnaryOpLValue(const UnaryOperator *E);
3329  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
3330  bool Accessed = false);
3331  LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
3332  bool IsLowerBound = true);
3333  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
3334  LValue EmitMemberExpr(const MemberExpr *E);
3335  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
3336  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
3337  LValue EmitInitListLValue(const InitListExpr *E);
3338  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
3339  LValue EmitCastLValue(const CastExpr *E);
3340  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
3341  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
3342 
3343  Address EmitExtVectorElementLValue(LValue V);
3344 
3345  RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
3346 
3347  Address EmitArrayToPointerDecay(const Expr *Array,
3348  LValueBaseInfo *BaseInfo = nullptr,
3349  TBAAAccessInfo *TBAAInfo = nullptr);
3350 
3352  llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
3353  ConstantEmission(llvm::Constant *C, bool isReference)
3354  : ValueAndIsReference(C, isReference) {}
3355  public:
3357  static ConstantEmission forReference(llvm::Constant *C) {
3358  return ConstantEmission(C, true);
3359  }
3360  static ConstantEmission forValue(llvm::Constant *C) {
3361  return ConstantEmission(C, false);
3362  }
3363 
3364  explicit operator bool() const {
3365  return ValueAndIsReference.getOpaqueValue() != nullptr;
3366  }
3367 
3368  bool isReference() const { return ValueAndIsReference.getInt(); }
3369  LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
3370  assert(isReference());
3371  return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
3372  refExpr->getType());
3373  }
3374 
3375  llvm::Constant *getValue() const {
3376  assert(!isReference());
3377  return ValueAndIsReference.getPointer();
3378  }
3379  };
3380 
3381  ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
3382  ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
3383 
3384  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
3385  AggValueSlot slot = AggValueSlot::ignored());
3386  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
3387 
3388  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
3389  const ObjCIvarDecl *Ivar);
3390  LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
3391  LValue EmitLValueForLambdaField(const FieldDecl *Field);
3392 
3393  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
3394  /// if the Field is a reference, this will return the address of the reference
3395  /// and not the address of the value stored in the reference.
3396  LValue EmitLValueForFieldInitialization(LValue Base,
3397  const FieldDecl* Field);
3398 
3399  LValue EmitLValueForIvar(QualType ObjectTy,
3400  llvm::Value* Base, const ObjCIvarDecl *Ivar,
3401  unsigned CVRQualifiers);
3402 
3403  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
3404  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
3405  LValue EmitLambdaLValue(const LambdaExpr *E);
3406  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
3407  LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
3408 
3409  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
3410  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
3411  LValue EmitStmtExprLValue(const StmtExpr *E);
3412  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
3413  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
3414  void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
3415 
3416  //===--------------------------------------------------------------------===//
3417  // Scalar Expression Emission
3418  //===--------------------------------------------------------------------===//
3419 
3420  /// EmitCall - Generate a call of the given function, expecting the given
3421  /// result type, and using the given argument list which specifies both the
3422  /// LLVM arguments and the types they were derived from.
3423  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3424  ReturnValueSlot ReturnValue, const CallArgList &Args,
3425  llvm::Instruction **callOrInvoke, SourceLocation Loc);
3426  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3427  ReturnValueSlot ReturnValue, const CallArgList &Args,
3428  llvm::Instruction **callOrInvoke = nullptr) {
3429  return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
3430  SourceLocation());
3431  }
3432  RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
3433  ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
3434  RValue EmitCallExpr(const CallExpr *E,
3435  ReturnValueSlot ReturnValue = ReturnValueSlot());
3436  RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3437  CGCallee EmitCallee(const Expr *E);
3438 
3439  void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
3440 
3441  llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
3442  const Twine &name = "");
3443  llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
3445  const Twine &name = "");
3446  llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
3447  const Twine &name = "");
3448  llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
3450  const Twine &name = "");
3451 
3453  getBundlesForFunclet(llvm::Value *Callee);
3454 
3455  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
3457  const Twine &Name = "");
3458  llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
3460  const Twine &name = "");
3461  llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
3462  const Twine &name = "");
3463  void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3464  ArrayRef<llvm::Value*> args);
3465 
3467  NestedNameSpecifier *Qual,
3468  llvm::Type *Ty);
3469 
3470  CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
3471  CXXDtorType Type,
3472  const CXXRecordDecl *RD);
3473 
3474  // These functions emit calls to the special functions of non-trivial C
3475  // structs.
3476  void defaultInitNonTrivialCStructVar(LValue Dst);
3477  void callCStructDefaultConstructor(LValue Dst);
3478  void callCStructDestructor(LValue Dst);
3479  void callCStructCopyConstructor(LValue Dst, LValue Src);
3480  void callCStructMoveConstructor(LValue Dst, LValue Src);
3481  void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
3482  void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
3483 
3484  RValue
3485  EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method,
3486  const CGCallee &Callee,
3487  ReturnValueSlot ReturnValue, llvm::Value *This,
3488  llvm::Value *ImplicitParam,
3489  QualType ImplicitParamTy, const CallExpr *E,
3490  CallArgList *RtlArgs);
3491  RValue EmitCXXDestructorCall(const CXXDestructorDecl *DD,
3492  const CGCallee &Callee,
3493  llvm::Value *This, llvm::Value *ImplicitParam,
3494  QualType ImplicitParamTy, const CallExpr *E,
3495  StructorType Type);
3496  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
3497  ReturnValueSlot ReturnValue);
3498  RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
3499  const CXXMethodDecl *MD,
3500  ReturnValueSlot ReturnValue,
3501  bool HasQualifier,
3502  NestedNameSpecifier *Qualifier,
3503  bool IsArrow, const Expr *Base);
3504  // Compute the object pointer.
3505  Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
3506  llvm::Value *memberPtr,
3507  const MemberPointerType *memberPtrType,
3508  LValueBaseInfo *BaseInfo = nullptr,
3509  TBAAAccessInfo *TBAAInfo = nullptr);
3510  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
3511  ReturnValueSlot ReturnValue);
3512 
3513  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
3514  const CXXMethodDecl *MD,
3515  ReturnValueSlot ReturnValue);
3516  RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
3517 
3518  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
3519  ReturnValueSlot ReturnValue);
3520 
3521  RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
3522  ReturnValueSlot ReturnValue);
3523 
3524  RValue EmitBuiltinExpr(const FunctionDecl *FD,
3525  unsigned BuiltinID, const CallExpr *E,
3526  ReturnValueSlot ReturnValue);
3527 
3528  /// Emit IR for __builtin_os_log_format.
3529  RValue emitBuiltinOSLogFormat(const CallExpr &E);
3530 
3531  llvm::Function *generateBuiltinOSLogHelperFunction(
3532  const analyze_os_log::OSLogBufferLayout &Layout,
3533  CharUnits BufferAlignment);
3534 
3535  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3536 
3537  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
3538  /// is unhandled by the current target.
3539  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3540 
3541  llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
3542  const llvm::CmpInst::Predicate Fp,
3543  const llvm::CmpInst::Predicate Ip,
3544  const llvm::Twine &Name = "");
3545  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3546  llvm::Triple::ArchType Arch);
3547 
3548  llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
3549  unsigned LLVMIntrinsic,
3550  unsigned AltLLVMIntrinsic,
3551  const char *NameHint,
3552  unsigned Modifier,
3553  const CallExpr *E,
3555  Address PtrOp0, Address PtrOp1,
3556  llvm::Triple::ArchType Arch);
3557 
3558  llvm::Value *EmitISOVolatileLoad(const CallExpr *E);
3559  llvm::Value *EmitISOVolatileStore(const CallExpr *E);
3560 
3561  llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
3562  unsigned Modifier, llvm::Type *ArgTy,
3563  const CallExpr *E);
3564  llvm::Value *EmitNeonCall(llvm::Function *F,
3566  const char *name,
3567  unsigned shift = 0, bool rightshift = false);
3568  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
3569  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
3570  bool negateForRightShift);
3571  llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
3572  llvm::Type *Ty, bool usgn, const char *name);
3573  llvm::Value *vectorWrapScalar16(llvm::Value *Op);
3574  llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3575  llvm::Triple::ArchType Arch);
3576 
3577  llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
3578  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3579  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3580  llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3581  llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3582  llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3583  llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
3584  const CallExpr *E);
3585  llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3586 
3587 private:
3588  enum class MSVCIntrin;
3589 
3590 public:
3591  llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
3592 
3593  llvm::Value *EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args);
3594 
3595  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
3596  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
3597  llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
3598  llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
3599  llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
3600  llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
3601  const ObjCMethodDecl *MethodWithObjects);
3602  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
3603  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
3604  ReturnValueSlot Return = ReturnValueSlot());
3605 
3606  /// Retrieves the default cleanup kind for an ARC cleanup.
3607  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
3609  return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
3611  }
3612 
3613  // ARC primitives.
3614  void EmitARCInitWeak(Address addr, llvm::Value *value);
3615  void EmitARCDestroyWeak(Address addr);
3616  llvm::Value *EmitARCLoadWeak(Address addr);
3617  llvm::Value *EmitARCLoadWeakRetained(Address addr);
3618  llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
3619  void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3620  void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3621  void EmitARCCopyWeak(Address dst, Address src);
3622  void EmitARCMoveWeak(Address dst, Address src);
3623  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
3624  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
3625  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
3626  bool resultIgnored);
3627  llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
3628  bool resultIgnored);
3629  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
3630  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
3631  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
3632  void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
3633  void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
3634  llvm::Value *EmitARCAutorelease(llvm::Value *value);
3635  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
3636  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
3637  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
3638  llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
3639 
3640  std::pair<LValue,llvm::Value*>
3641  EmitARCStoreAutoreleasing(const BinaryOperator *e);
3642  std::pair<LValue,llvm::Value*>
3643  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
3644  std::pair<LValue,llvm::Value*>
3645  EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
3646 
3647  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
3648  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
3649  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
3650 
3651  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
3652  llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
3653  bool allowUnsafeClaim);
3654  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
3655  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
3656  llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
3657 
3658  void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
3659 
3660  static Destroyer destroyARCStrongImprecise;
3661  static Destroyer destroyARCStrongPrecise;
3662  static Destroyer destroyARCWeak;
3663  static Destroyer emitARCIntrinsicUse;
3664  static Destroyer destroyNonTrivialCStruct;
3665 
3666  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
3667  llvm::Value *EmitObjCAutoreleasePoolPush();
3668  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
3669  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
3670  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
3671 
3672  /// Emits a reference binding to the passed in expression.
3673  RValue EmitReferenceBindingToExpr(const Expr *E);
3674 
3675  //===--------------------------------------------------------------------===//
3676  // Expression Emission
3677  //===--------------------------------------------------------------------===//
3678 
3679  // Expressions are broken into three classes: scalar, complex, aggregate.
3680 
3681  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
3682  /// scalar type, returning the result.
3683  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
3684 
3685  /// Emit a conversion from the specified type to the specified destination
3686  /// type, both of which are LLVM scalar types.
3687  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
3688  QualType DstTy, SourceLocation Loc);
3689 
3690  /// Emit a conversion from the specified complex type to the specified
3691  /// destination type, where the destination type is an LLVM scalar type.
3692  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
3693  QualType DstTy,
3694  SourceLocation Loc);
3695 
3696  /// EmitAggExpr - Emit the computation of the specified expression
3697  /// of aggregate type. The result is computed into the given slot,
3698  /// which may be null to indicate that the value is not needed.
3699  void EmitAggExpr(const Expr *E, AggValueSlot AS);
3700 
3701  /// EmitAggExprToLValue - Emit the computation of the specified expression of
3702  /// aggregate type into a temporary LValue.
3703  LValue EmitAggExprToLValue(const Expr *E);
3704 
3705  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3706  /// make sure it survives garbage collection until this point.
3707  void EmitExtendGCLifetime(llvm::Value *object);
3708 
3709  /// EmitComplexExpr - Emit the computation of the specified expression of
3710  /// complex type, returning the result.
3711  ComplexPairTy EmitComplexExpr(const Expr *E,
3712  bool IgnoreReal = false,
3713  bool IgnoreImag = false);
3714 
3715  /// EmitComplexExprIntoLValue - Emit the given expression of complex
3716  /// type and place its result into the specified l-value.
3717  void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
3718 
3719  /// EmitStoreOfComplex - Store a complex number into the specified l-value.
3720  void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
3721 
3722  /// EmitLoadOfComplex - Load a complex number from the specified l-value.
3723  ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
3724 
3725  Address emitAddrOfRealComponent(Address complex, QualType complexType);
3726  Address emitAddrOfImagComponent(Address complex, QualType complexType);
3727 
3728  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
3729  /// global variable that has already been created for it. If the initializer
3730  /// has a different type than GV does, this may free GV and return a different
3731  /// one. Otherwise it just returns GV.
3732  llvm::GlobalVariable *
3733  AddInitializerToStaticVarDecl(const VarDecl &D,
3734  llvm::GlobalVariable *GV);
3735 
3736 
3737  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
3738  /// variable with global storage.
3739  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
3740  bool PerformInit);
3741 
3742  llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::Constant *Dtor,
3743  llvm::Constant *Addr);
3744 
3745  /// Call atexit() with a function that passes the given argument to
3746  /// the given function.
3747  void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
3748  llvm::Constant *addr);
3749 
3750  /// Call atexit() with function dtorStub.
3751  void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
3752 
3753  /// Emit code in this function to perform a guarded variable
3754  /// initialization. Guarded initializations are used when it's not
3755  /// possible to prove that an initialization will be done exactly
3756  /// once, e.g. with a static local variable or a static data member
3757  /// of a class template.
3758  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
3759  bool PerformInit);
3760 
3761  enum class GuardKind { VariableGuard, TlsGuard };
3762 
3763  /// Emit a branch to select whether or not to perform guarded initialization.
3764  void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
3765  llvm::BasicBlock *InitBlock,
3766  llvm::BasicBlock *NoInitBlock,
3767  GuardKind Kind, const VarDecl *D);
3768 
3769  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
3770  /// variables.
3771  void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
3772  ArrayRef<llvm::Function *> CXXThreadLocals,
3773  Address Guard = Address::invalid());
3774 
3775  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
3776  /// variables.
3777  void GenerateCXXGlobalDtorsFunc(
3778  llvm::Function *Fn,
3779  const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
3780  &DtorsAndObjects);
3781 
3782  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
3783  const VarDecl *D,
3784  llvm::GlobalVariable *Addr,
3785  bool PerformInit);
3786 
3787  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
3788 
3789  void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
3790 
3792  if (E->getNumObjects() == 0) return;
3793  enterNonTrivialFullExpression(E);
3794  }
3795  void enterNonTrivialFullExpression(const ExprWithCleanups *E);
3796 
3797  void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
3798 
3799  void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
3800 
3801  RValue EmitAtomicExpr(AtomicExpr *E);
3802 
3803  //===--------------------------------------------------------------------===//
3804  // Annotations Emission
3805  //===--------------------------------------------------------------------===//
3806 
3807  /// Emit an annotation call (intrinsic or builtin).
3808  llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
3809  llvm::Value *AnnotatedVal,
3810  StringRef AnnotationStr,
3811  SourceLocation Location);
3812 
3813  /// Emit local annotations for the local variable V, declared by D.
3814  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
3815 
3816  /// Emit field annotations for the given field & value. Returns the
3817  /// annotation result.
3818  Address EmitFieldAnnotations(const FieldDecl *D, Address V);
3819 
3820  //===--------------------------------------------------------------------===//
3821  // Internal Helpers
3822  //===--------------------------------------------------------------------===//
3823 
3824  /// ContainsLabel - Return true if the statement contains a label in it. If
3825  /// this statement is not executed normally, it not containing a label means
3826  /// that we can just remove the code.
3827  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
3828 
3829  /// containsBreak - Return true if the statement contains a break out of it.
3830  /// If the statement (recursively) contains a switch or loop with a break
3831  /// inside of it, this is fine.
3832  static bool containsBreak(const Stmt *S);
3833 
3834  /// Determine if the given statement might introduce a declaration into the
3835  /// current scope, by being a (possibly-labelled) DeclStmt.
3836  static bool mightAddDeclToScope(const Stmt *S);
3837 
3838  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
3839  /// to a constant, or if it does but contains a label, return false. If it
3840  /// constant folds return true and set the boolean result in Result.
3841  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
3842  bool AllowLabels = false);
3843 
3844  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
3845  /// to a constant, or if it does but contains a label, return false. If it
3846  /// constant folds return true and set the folded value.
3847  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
3848  bool AllowLabels = false);
3849 
3850  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
3851  /// if statement) to the specified blocks. Based on the condition, this might
3852  /// try to simplify the codegen of the conditional based on the branch.
3853  /// TrueCount should be the number of times we expect the condition to
3854  /// evaluate to true based on PGO data.
3855  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
3856  llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
3857 
3858  /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
3859  /// nonnull, if \p LHS is marked _Nonnull.
3860  void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
3861 
3862  /// An enumeration which makes it easier to specify whether or not an
3863  /// operation is a subtraction.
3864  enum { NotSubtraction = false, IsSubtraction = true };
3865 
3866  /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
3867  /// detect undefined behavior when the pointer overflow sanitizer is enabled.
3868  /// \p SignedIndices indicates whether any of the GEP indices are signed.
3869  /// \p IsSubtraction indicates whether the expression used to form the GEP
3870  /// is a subtraction.
3871  llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
3872  ArrayRef<llvm::Value *> IdxList,
3873  bool SignedIndices,
3874  bool IsSubtraction,
3875  SourceLocation Loc,
3876  const Twine &Name = "");
3877 
3878  /// Specifies which type of sanitizer check to apply when handling a
3879  /// particular builtin.
3883  };
3884 
3885  /// Emits an argument for a call to a builtin. If the builtin sanitizer is
3886  /// enabled, a runtime check specified by \p Kind is also emitted.
3887  llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
3888 
3889  /// Emit a description of a type in a format suitable for passing to
3890  /// a runtime sanitizer handler.
3891  llvm::Constant *EmitCheckTypeDescriptor(QualType T);
3892 
3893  /// Convert a value into a format suitable for passing to a runtime
3894  /// sanitizer handler.
3895  llvm::Value *EmitCheckValue(llvm::Value *V);
3896 
3897  /// Emit a description of a source location in a format suitable for
3898  /// passing to a runtime sanitizer handler.
3899  llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
3900 
3901  /// Create a basic block that will call a handler function in a
3902  /// sanitizer runtime with the provided arguments, and create a conditional
3903  /// branch to it.
3904  void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3905  SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
3906  ArrayRef<llvm::Value *> DynamicArgs);
3907 
3908  /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
3909  /// if Cond if false.
3910  void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
3911  llvm::ConstantInt *TypeId, llvm::Value *Ptr,
3912  ArrayRef<llvm::Constant *> StaticArgs);
3913 
3914  /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
3915  /// checking is enabled. Otherwise, just emit an unreachable instruction.
3916  void EmitUnreachable(SourceLocation Loc);
3917 
3918  /// Create a basic block that will call the trap intrinsic, and emit a
3919  /// conditional branch to it, for the -ftrapv checks.
3920  void EmitTrapCheck(llvm::Value *Checked);
3921 
3922  /// Emit a call to trap or debugtrap and attach function attribute
3923  /// "trap-func-name" if specified.
3924  llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
3925 
3926  /// Emit a stub for the cross-DSO CFI check function.
3927  void EmitCfiCheckStub();
3928 
3929  /// Emit a cross-DSO CFI failure handling function.
3930  void EmitCfiCheckFail();
3931 
3932  /// Create a check for a function parameter that may potentially be
3933  /// declared as non-null.
3934  void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
3935  AbstractCallee AC, unsigned ParmNum);
3936 
3937  /// EmitCallArg - Emit a single call argument.
3938  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
3939 
3940  /// EmitDelegateCallArg - We are performing a delegate call; that
3941  /// is, the current function is delegating to another one. Produce
3942  /// a r-value suitable for passing the given parameter.
3943  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
3944  SourceLocation loc);
3945 
3946  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
3947  /// point operation, expressed as the maximum relative error in ulp.
3948  void SetFPAccuracy(llvm::Value *Val, float Accuracy);
3949 
3950 private:
3951  llvm::MDNode *getRangeForLoadFromType(QualType Ty);
3952  void EmitReturnOfRValue(RValue RV, QualType Ty);
3953 
3954  void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
3955 
3957  DeferredReplacements;
3958 
3959  /// Set the address of a local variable.
3960  void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
3961  assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
3962  LocalDeclMap.insert({VD, Addr});
3963  }
3964 
3965  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
3966  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
3967  ///
3968  /// \param AI - The first function argument of the expansion.
3969  void ExpandTypeFromArgs(QualType Ty, LValue Dst,
3971 
3972  /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
3973  /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
3974  /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
3975  void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
3976  SmallVectorImpl<llvm::Value *> &IRCallArgs,
3977  unsigned &IRCallArgPos);
3978 
3979  llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
3980  const Expr *InputExpr, std::string &ConstraintStr);
3981 
3982  llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
3983  LValue InputValue, QualType InputType,
3984  std::string &ConstraintStr,
3985  SourceLocation Loc);
3986 
3987  /// Attempts to statically evaluate the object size of E. If that
3988  /// fails, emits code to figure the size of E out for us. This is
3989  /// pass_object_size aware.
3990  ///
3991  /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
3992  llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
3993  llvm::IntegerType *ResType,
3994  llvm::Value *EmittedE);
3995 
3996  /// Emits the size of E, as required by __builtin_object_size. This
3997  /// function is aware of pass_object_size parameters, and will act accordingly
3998  /// if E is a parameter with the pass_object_size attribute.
3999  llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
4000  llvm::IntegerType *ResType,
4001  llvm::Value *EmittedE);
4002 
4003 public:
4004 #ifndef NDEBUG
4005  // Determine whether the given argument is an Objective-C method
4006  // that may have type parameters in its signature.
4007  static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4008  const DeclContext *dc = method->getDeclContext();
4009  if (const ObjCInterfaceDecl *classDecl= dyn_cast<ObjCInterfaceDecl>(dc)) {
4010  return classDecl->getTypeParamListAsWritten();
4011  }
4012 
4013  if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4014  return catDecl->getTypeParamList();
4015  }
4016 
4017  return false;
4018  }
4019 
4020  template<typename T>
4021  static bool isObjCMethodWithTypeParams(const T *) { return false; }
4022 #endif
4023 
4024  enum class EvaluationOrder {
4025  ///! No language constraints on evaluation order.
4026  Default,
4027  ///! Language semantics require left-to-right evaluation.
4028  ForceLeftToRight,
4029  ///! Language semantics require right-to-left evaluation.
4030  ForceRightToLeft
4031  };
4032 
4033  /// EmitCallArgs - Emit call arguments for a function.
4034  template <typename T>
4035  void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
4036  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4038  unsigned ParamsToSkip = 0,
4039  EvaluationOrder Order = EvaluationOrder::Default) {
4040  SmallVector<QualType, 16> ArgTypes;
4041  CallExpr::const_arg_iterator Arg = ArgRange.begin();
4042 
4043  assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
4044  "Can't skip parameters if type info is not provided");
4045  if (CallArgTypeInfo) {
4046 #ifndef NDEBUG
4047  bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo);
4048 #endif
4049 
4050  // First, use the argument types that the type info knows about
4051  for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
4052  E = CallArgTypeInfo->param_type_end();
4053  I != E; ++I, ++Arg) {
4054  assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4055  assert((isGenericMethod ||
4056  ((*I)->isVariablyModifiedType() ||
4057  (*I).getNonReferenceType()->isObjCRetainableType() ||
4058  getContext()
4059  .getCanonicalType((*I).getNonReferenceType())
4060  .getTypePtr() ==
4061  getContext()
4062  .getCanonicalType((*Arg)->getType())
4063  .getTypePtr())) &&
4064  "type mismatch in call argument!");
4065  ArgTypes.push_back(*I);
4066  }
4067  }
4068 
4069  // Either we've emitted all the call args, or we have a call to variadic
4070  // function.
4071  assert((Arg == ArgRange.end() || !CallArgTypeInfo ||
4072  CallArgTypeInfo->isVariadic()) &&
4073  "Extra arguments in non-variadic function!");
4074 
4075  // If we still have any arguments, emit them using the type of the argument.
4076  for (auto *A : llvm::make_range(Arg, ArgRange.end()))
4077  ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType());
4078 
4079  EmitCallArgs(Args, ArgTypes, ArgRange, AC, ParamsToSkip, Order);
4080  }
4081 
4082  void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
4083  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4085  unsigned ParamsToSkip = 0,
4086  EvaluationOrder Order = EvaluationOrder::Default);
4087 
4088  /// EmitPointerWithAlignment - Given an expression with a pointer type,
4089  /// emit the value and compute our best estimate of the alignment of the
4090  /// pointee.
4091  ///
4092  /// \param BaseInfo - If non-null, this will be initialized with
4093  /// information about the source of the alignment and the may-alias
4094  /// attribute. Note that this function will conservatively fall back on
4095  /// the type when it doesn't recognize the expression and may-alias will
4096  /// be set to false.
4097  ///
4098  /// One reasonable way to use this information is when there's a language
4099  /// guarantee that the pointer must be aligned to some stricter value, and
4100  /// we're simply trying to ensure that sufficiently obvious uses of under-
4101  /// aligned objects don't get miscompiled; for example, a placement new
4102  /// into the address of a local variable. In such a case, it's quite
4103  /// reasonable to just ignore the returned alignment when it isn't from an
4104  /// explicit source.
4105  Address EmitPointerWithAlignment(const Expr *Addr,
4106  LValueBaseInfo *BaseInfo = nullptr,
4107  TBAAAccessInfo *TBAAInfo = nullptr);
4108 
4109  /// If \p E references a parameter with pass_object_size info or a constant
4110  /// array size modifier, emit the object size divided by the size of \p EltTy.
4111  /// Otherwise return null.
4112  llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
4113 
4114  void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
4115 
4117  llvm::Function *Function;
4118  TargetAttr::ParsedTargetAttr ParsedAttribute;
4119  unsigned Priority;
4121  const TargetInfo &TargInfo, llvm::Function *F,
4122  const clang::TargetAttr::ParsedTargetAttr &PT)
4123  : Function(F), ParsedAttribute(PT), Priority(0u) {
4124  for (StringRef Feat : PT.Features)
4125  Priority = std::max(Priority,
4126  TargInfo.multiVersionSortPriority(Feat.substr(1)));
4127 
4128  if (!PT.Architecture.empty())
4129  Priority = std::max(Priority,
4130  TargInfo.multiVersionSortPriority(PT.Architecture));
4131  }
4132 
4133  bool operator>(const TargetMultiVersionResolverOption &Other) const {
4134  return Priority > Other.Priority;
4135  }
4136  };
4137  void EmitTargetMultiVersionResolver(
4138  llvm::Function *Resolver,
4140 
4142  llvm::Function *Function;
4143  // Note: EmitX86CPUSupports only has 32 bits available, so we store the mask
4144  // as 32 bits here. When 64-bit support is added to __builtin_cpu_supports,
4145  // this can be extended to 64 bits.
4146  uint32_t FeatureMask;
4147  CPUDispatchMultiVersionResolverOption(llvm::Function *F, uint64_t Mask)
4148  : Function(F), FeatureMask(static_cast<uint32_t>(Mask)) {}
4150  return FeatureMask > Other.FeatureMask;
4151  }
4152  };
4153  void EmitCPUDispatchMultiVersionResolver(
4154  llvm::Function *Resolver,
4156  static uint32_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
4157 
4158 private:
4159  QualType getVarArgType(const Expr *Arg);
4160 
4161  void EmitDeclMetadata();
4162 
4163  BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
4164  const AutoVarEmission &emission);
4165 
4166  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
4167 
4168  llvm::Value *GetValueForARMHint(unsigned BuiltinID);
4169  llvm::Value *EmitX86CpuIs(const CallExpr *E);
4170  llvm::Value *EmitX86CpuIs(StringRef CPUStr);
4171  llvm::Value *EmitX86CpuSupports(const CallExpr *E);
4172  llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
4173  llvm::Value *EmitX86CpuSupports(uint32_t Mask);
4174  llvm::Value *EmitX86CpuInit();
4175  llvm::Value *
4176  FormResolverCondition(const TargetMultiVersionResolverOption &RO);
4177 };
4178 
4179 /// Helper class with most of the code for saving a value for a
4180 /// conditional expression cleanup.
4182  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
4183 
4184  /// Answer whether the given value needs extra work to be saved.
4185  static bool needsSaving(llvm::Value *value) {
4186  // If it's not an instruction, we don't need to save.
4187  if (!isa<llvm::Instruction>(value)) return false;
4188 
4189  // If it's an instruction in the entry block, we don't need to save.
4190  llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
4191  return (block != &block->getParent()->getEntryBlock());
4192  }
4193 
4194  /// Try to save the given value.
4195  static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
4196  if (!needsSaving(value)) return saved_type(value, false);
4197 
4198  // Otherwise, we need an alloca.
4199  auto align = CharUnits::fromQuantity(
4200  CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
4201  Address alloca =
4202  CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
4203  CGF.Builder.CreateStore(value, alloca);
4204 
4205  return saved_type(alloca.getPointer(), true);
4206  }
4207 
4208  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
4209  // If the value says it wasn't saved, trust that it's still dominating.
4210  if (!value.getInt()) return value.getPointer();
4211 
4212  // Otherwise, it should be an alloca instruction, as set up in save().
4213  auto alloca = cast<llvm::AllocaInst>(value.getPointer());
4214  return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
4215  }
4216 };
4217 
4218 /// A partial specialization of DominatingValue for llvm::Values that
4219 /// might be llvm::Instructions.
4220 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
4221  typedef T *type;
4222  static type restore(CodeGenFunction &CGF, saved_type value) {
4223  return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
4224  }
4225 };
4226 
4227 /// A specialization of DominatingValue for Address.
4228 template <> struct DominatingValue<Address> {
4229  typedef Address type;
4230 
4231  struct saved_type {
4234  };
4235 
4236  static bool needsSaving(type value) {
4237  return DominatingLLVMValue::needsSaving(value.getPointer());
4238  }
4239  static saved_type save(CodeGenFunction &CGF, type value) {
4240  return { DominatingLLVMValue::save(CGF, value.getPointer()),
4241  value.getAlignment() };
4242  }
4243  static type restore(CodeGenFunction &CGF, saved_type value) {
4244  return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
4245  value.Alignment);
4246  }
4247 };
4248 
4249 /// A specialization of DominatingValue for RValue.
4250 template <> struct DominatingValue<RValue> {
4251  typedef RValue type;
4252  class saved_type {
4253  enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
4254  AggregateAddress, ComplexAddress };
4255 
4256  llvm::Value *Value;
4257  unsigned K : 3;
4258  unsigned Align : 29;
4259  saved_type(llvm::Value *v, Kind k, unsigned a = 0)
4260  : Value(v), K(k), Align(a) {}
4261 
4262  public:
4263  static bool needsSaving(RValue value);
4264  static saved_type save(CodeGenFunction &CGF, RValue value);
4265  RValue restore(CodeGenFunction &CGF);
4266 
4267  // implementations in CGCleanup.cpp
4268  };
4269 
4270  static bool needsSaving(type value) {
4271  return saved_type::needsSaving(value);
4272  }
4273  static saved_type save(CodeGenFunction &CGF, type value) {
4274  return saved_type::save(CGF, value);
4275  }
4276  static type restore(CodeGenFunction &CGF, saved_type value) {
4277  return value.restore(CGF);
4278  }
4279 };
4280 
4281 } // end namespace CodeGen
4282 } // end namespace clang
4283 
4284 #endif
const llvm::DataLayout & getDataLayout() const
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:78
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:361
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
Optional< uint64_t > getStmtCount(const Stmt *S)
Check if an execution count is known for a given statement.
Definition: CodeGenPGO.h:64
This represents &#39;#pragma omp distribute simd&#39; composite directive.
Definition: StmtOpenMP.h:3212
Information about the layout of a __block variable.
Definition: CGBlocks.h:141
This represents &#39;#pragma omp master&#39; directive.
Definition: StmtOpenMP.h:1395
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
This represents &#39;#pragma omp task&#39; directive.
Definition: StmtOpenMP.h:1735
Represents a function declaration or definition.
Definition: Decl.h:1714
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2387
Scheduling data for loop-based OpenMP directives.
Definition: OpenMPKinds.h:124
A (possibly-)qualified type.
Definition: Type.h:655
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Type *Ty, const CXXRecordDecl *RD)
Definition: CGCXX.cpp:258
const CodeGenOptions & getCodeGenOpts() const
The class detects jumps which bypass local variables declaration: goto L; int a; L: ...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:126
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::LLVMContext & getLLVMContext()
DominatorTree GraphTraits specialization so the DominatorTree can be iterable by generic graph iterat...
Definition: Dominators.h:30
FieldConstructionScope(CodeGenFunction &CGF, Address This)
Represents a &#39;co_return&#39; statement in the C++ Coroutines TS.
Definition: StmtCXX.h:432
Stmt - This represents one statement.
Definition: Stmt.h:66
IfStmt - This represents an if/then/else.
Definition: Stmt.h:949
static T * buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo, T &&generator)
Lazily build the copy and dispose helpers for a __block variable with the given information.
Definition: CGBlocks.cpp:2194
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
C Language Family Type Representation.
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it&#39;s the GNU ...
This represents &#39;#pragma omp for simd&#39; directive.
Definition: StmtOpenMP.h:1145
Checking the &#39;this&#39; pointer for a constructor call.
bool hasVolatileMember() const
Definition: Decl.h:3672
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
This represents &#39;#pragma omp teams distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3623
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
static saved_type save(CodeGenFunction &CGF, llvm::Value *value)
Try to save the given value.
static bool classof(const CGCapturedStmtInfo *)
Represents an attribute applied to a statement.
Definition: Stmt.h:897
static Destroyer destroyARCStrongPrecise
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of &#39;this&#39;.
The base class of the type hierarchy.
Definition: Type.h:1428
This represents &#39;#pragma omp target teams distribute&#39; combined directive.
Definition: StmtOpenMP.h:3760
Represents Objective-C&#39;s @throw statement.
Definition: StmtObjC.h:313
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition: Stmt.h:2188
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2662
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1240
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:379
DominatingValue< T >::saved_type saveValueInCond(T value)
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const ParmVarDecl * getParamDecl(unsigned I) const
This represents &#39;#pragma omp parallel for&#39; directive.
Definition: StmtOpenMP.h:1516
void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S, llvm::Value *StepV)
Definition: CodeGenPGO.cpp:886
This represents &#39;#pragma omp target teams distribute parallel for&#39; combined directive.
Definition: StmtOpenMP.h:3828
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2475
Represents a prvalue temporary that is written into memory so that a reference can bind to it...
Definition: ExprCXX.h:4040
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
static type restore(CodeGenFunction &CGF, saved_type value)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
Represents a point when we exit a loop.
Definition: ProgramPoint.h:687
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:2858
This represents &#39;#pragma omp target exit data&#39; directive.
Definition: StmtOpenMP.h:2427
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:812
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC &#39;id&#39; type.
Definition: ExprObjC.h:1436
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:2715
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6519
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:54
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:139
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
llvm::Value * getPointer() const
Definition: Address.h:38
static ConstantEmission forValue(llvm::Constant *C)
capture_iterator capture_begin()
Retrieve an iterator pointing to the first capture.
Definition: Stmt.h:2213
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:986
Represents an expression – generally a full-expression – that introduces cleanups to be run at the ...
Definition: ExprCXX.h:3004
Represents a parameter to a function.
Definition: Decl.h:1533
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have...
Definition: Linkage.h:24
Defines the clang::Expr interface and subclasses for C++ expressions.
The collection of all-type qualifiers we support.
Definition: Type.h:154
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:858
Represents a struct/union/class.
Definition: Decl.h:3559
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:198
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
void setScopeDepth(EHScopeStack::stable_iterator depth)
This represents &#39;#pragma omp parallel&#39; directive.
Definition: StmtOpenMP.h:274
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:150
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
Represents a member of a struct/union/class.
Definition: Decl.h:2532
Definition: Format.h:2031
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
bool isReferenceType() const
Definition: Type.h:6118
Helper class with most of the code for saving a value for a conditional expression cleanup...
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code...
This represents &#39;#pragma omp target simd&#39; directive.
Definition: StmtOpenMP.h:3348
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:934
Defines some OpenMP-specific enums and functions.
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:4874
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself...
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function...
Definition: EHScopeStack.h:66
This represents &#39;#pragma omp barrier&#39; directive.
Definition: StmtOpenMP.h:1847
CleanupKind getCleanupKind(QualType::DestructionKind kind)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:50
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp, [NSNumber numberWithInt:42]];.
Definition: ExprObjC.h:171
The this pointer adjustment as well as an optional return adjustment for a thunk. ...
Definition: ABI.h:179
This is a common base class for loop directives (&#39;omp simd&#39;, &#39;omp for&#39;, &#39;omp for simd&#39; etc...
Definition: StmtOpenMP.h:336
This represents &#39;#pragma omp critical&#39; directive.
Definition: StmtOpenMP.h:1442
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:194
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
OpenMPDistScheduleClauseKind
OpenMP attributes for &#39;dist_schedule&#39; clause.
Definition: OpenMPKinds.h:100
bool isGLValue() const
Definition: Expr.h:252
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:1328
Describes an C or C++ initializer list.
Definition: Expr.h:3956
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:639
This represents &#39;#pragma omp distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3063
void setCurrentRegionCount(uint64_t Count)
Set the counter value for the current region.
Definition: CodeGenPGO.h:60
A class controlling the emission of a finally block.
This represents &#39;#pragma omp teams distribute parallel for simd&#39; composite directive.
Definition: StmtOpenMP.h:3552
BinaryOperatorKind
static bool hasScalarEvaluationKind(QualType T)
ForStmt - This represents a &#39;for (init;cond;inc)&#39; stmt.
Definition: Stmt.h:1223
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:986
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::function_ref< std::pair< LValue, LValue > CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
LexicalScope(CodeGenFunction &CGF, SourceRange Range)
Enter a new cleanup scope.
RAII for correct setting/restoring of CapturedStmtInfo.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
Represents a declaration of a type.
Definition: Decl.h:2827
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3069
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind...
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
CXXForRangeStmt - This represents C++0x [stmt.ranged]&#39;s ranged for statement, represented as &#39;for (ra...
Definition: StmtCXX.h:128
#define LIST_SANITIZER_CHECKS
This represents &#39;#pragma omp cancellation point&#39; directive.
Definition: StmtOpenMP.h:2682
bool operator>(const TargetMultiVersionResolverOption &Other) const
TargetMultiVersionResolverOption(const TargetInfo &TargInfo, llvm::Function *F, const clang::TargetAttr::ParsedTargetAttr &PT)
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:51
field_iterator field_begin() const
Definition: Decl.cpp:4032
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:100
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
This represents &#39;#pragma omp teams&#39; directive.
Definition: StmtOpenMP.h:2625
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
Enums/classes describing ABI related information about constructors, destructors and thunks...
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:2788
This represents &#39;#pragma omp teams distribute simd&#39; combined directive.
Definition: StmtOpenMP.h:3482
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1197
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
Definition: ExprCXX.h:1584
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
Controls insertion of cancellation exit blocks in worksharing constructs.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
CallLifetimeEnd(Address addr, llvm::Value *size)
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * > CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:106
Represents an ObjC class declaration.
Definition: DeclObjC.h:1191
Checking the operand of a cast to a virtual base object.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
Checking the operand of a load. Must be suitably sized and aligned.
~LexicalScope()
Exit this cleanup scope, emitting any accumulated cleanups.
Checking the &#39;this&#39; pointer for a call to a non-static member function.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2778
This represents &#39;#pragma omp target parallel for simd&#39; directive.
Definition: StmtOpenMP.h:3280
OpenMP 4.0 [2.4, Array Sections].
Definition: ExprOpenMP.h:45
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:357
bool isValid() const
Definition: Address.h:36
CleanupKind Kind
The kind of cleanup to push: a value from the CleanupKind enumeration.
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2201
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:609
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3426
Describes the capture of either a variable, or &#39;this&#39;, or variable-length array type.
Definition: Stmt.h:2083
void EmitAlignmentAssumption(llvm::Value *PtrValue, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
This represents &#39;#pragma omp taskgroup&#39; directive.
Definition: StmtOpenMP.h:1935
const TargetCodeGenInfo & getTargetCodeGenInfo()
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:150
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
The class used to assign some variables some temporarily addresses.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:3862
AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression.
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
This represents &#39;#pragma omp distribute&#39; directive.
Definition: StmtOpenMP.h:2936
Exposes information about the current target.
Definition: TargetInfo.h:54
CXXDtorType
C++ destructor types.
Definition: ABI.h:34
bool addPrivate(const VarDecl *LocalVD, const llvm::function_ref< Address()> PrivateGen)
Registers LocalVD variable as a private and apply PrivateGen function for it to generate correspondin...
EHScopeStack::stable_iterator getScopeDepth() const
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:636
Expr - This represents one expression.
Definition: Expr.h:106
Address getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups...
Definition: EHScopeStack.h:356
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
static ParamValue forIndirect(Address addr)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:4935
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2698
This represents &#39;#pragma omp target teams distribute parallel for simd&#39; combined directive.
Definition: StmtOpenMP.h:3912
static saved_type save(CodeGenFunction &CGF, type value)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp")
CreateAggTemp - Create a temporary memory object for the given aggregate type.
#define bool
Definition: stdbool.h:31
unsigned Size
The size of the following cleanup object.
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:288
DeclContext * getDeclContext()
Definition: DeclBase.h:426
Represents Objective-C&#39;s @synchronized statement.
Definition: StmtObjC.h:262
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:429
CXXTryStmt - A C++ try block, including all handlers.
Definition: StmtCXX.h:65
~OMPPrivateScope()
Exit scope - all the mapped variables are restored.
This represents &#39;#pragma omp target teams distribute simd&#39; combined directive.
Definition: StmtOpenMP.h:3985
int Depth
Definition: ASTDiff.cpp:191
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
llvm::LLVMContext & getLLVMContext()
QualType getType() const
Definition: Expr.h:128
Checking the value assigned to a _Nonnull pointer. Must not be null.
An RAII object to record that we&#39;re evaluating a statement expression.
This represents &#39;#pragma omp for&#39; directive.
Definition: StmtOpenMP.h:1068
ReturnStmt - This represents a return, optionally of an expression: return; return 4;...
Definition: Stmt.h:1433
This represents &#39;#pragma omp target teams&#39; directive.
Definition: StmtOpenMP.h:3701
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:903
JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth, unsigned Index)
SourceLocation getEnd() const
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1782
OMPTargetDataInfo(Address BasePointersArray, Address PointersArray, Address SizesArray, unsigned NumberOfTargetItems)
This represents &#39;#pragma omp cancel&#39; directive.
Definition: StmtOpenMP.h:2740
RunCleanupsScope(CodeGenFunction &CGF)
Enter a new cleanup scope.
const LangOptions & getLangOpts() const
ASTContext & getContext() const
do v
Definition: arm_acle.h:78