clang  9.0.0svn
CodeGenFunction.h
Go to the documentation of this file.
1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the internal per-function state used for llvm translation.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14 #define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15 
16 #include "CGBuilder.h"
17 #include "CGDebugInfo.h"
18 #include "CGLoopInfo.h"
19 #include "CGValue.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "EHScopeStack.h"
23 #include "VarBypassDetector.h"
24 #include "clang/AST/CharUnits.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/Type.h"
30 #include "clang/Basic/ABI.h"
34 #include "clang/Basic/TargetInfo.h"
35 #include "llvm/ADT/ArrayRef.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/MapVector.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/IR/ValueHandle.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Transforms/Utils/SanitizerStats.h"
42 
43 namespace llvm {
44 class BasicBlock;
45 class LLVMContext;
46 class MDNode;
47 class Module;
48 class SwitchInst;
49 class Twine;
50 class Value;
51 }
52 
53 namespace clang {
54 class ASTContext;
55 class BlockDecl;
56 class CXXDestructorDecl;
57 class CXXForRangeStmt;
58 class CXXTryStmt;
59 class Decl;
60 class LabelDecl;
61 class EnumConstantDecl;
62 class FunctionDecl;
63 class FunctionProtoType;
64 class LabelStmt;
65 class ObjCContainerDecl;
66 class ObjCInterfaceDecl;
67 class ObjCIvarDecl;
68 class ObjCMethodDecl;
69 class ObjCImplementationDecl;
70 class ObjCPropertyImplDecl;
71 class TargetInfo;
72 class VarDecl;
73 class ObjCForCollectionStmt;
74 class ObjCAtTryStmt;
75 class ObjCAtThrowStmt;
76 class ObjCAtSynchronizedStmt;
77 class ObjCAutoreleasePoolStmt;
78 
79 namespace analyze_os_log {
80 class OSLogBufferLayout;
81 }
82 
83 namespace CodeGen {
84 class CodeGenTypes;
85 class CGCallee;
86 class CGFunctionInfo;
87 class CGRecordLayout;
88 class CGBlockInfo;
89 class CGCXXABI;
90 class BlockByrefHelpers;
91 class BlockByrefInfo;
92 class BlockFlags;
93 class BlockFieldFlags;
94 class RegionCodeGenTy;
95 class TargetCodeGenInfo;
96 struct OMPTaskDataTy;
97 struct CGCoroData;
98 
99 /// The kind of evaluation to perform on values of a particular
100 /// type. Basically, is the code in CGExprScalar, CGExprComplex, or
101 /// CGExprAgg?
102 ///
103 /// TODO: should vectors maybe be split out into their own thing?
108 };
109 
110 #define LIST_SANITIZER_CHECKS \
111  SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
112  SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
113  SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
114  SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
115  SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
116  SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
117  SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
118  SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
119  SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
120  SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
121  SANITIZER_CHECK(MissingReturn, missing_return, 0) \
122  SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
123  SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
124  SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
125  SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
126  SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
127  SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
128  SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
129  SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
130  SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
131  SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
132  SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
133  SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
134  SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
135 
137 #define SANITIZER_CHECK(Enum, Name, Version) Enum,
139 #undef SANITIZER_CHECK
140 };
141 
142 /// Helper class with most of the code for saving a value for a
143 /// conditional expression cleanup.
145  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
146 
147  /// Answer whether the given value needs extra work to be saved.
148  static bool needsSaving(llvm::Value *value) {
149  // If it's not an instruction, we don't need to save.
150  if (!isa<llvm::Instruction>(value)) return false;
151 
152  // If it's an instruction in the entry block, we don't need to save.
153  llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
154  return (block != &block->getParent()->getEntryBlock());
155  }
156 
157  static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
158  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
159 };
160 
161 /// A partial specialization of DominatingValue for llvm::Values that
162 /// might be llvm::Instructions.
163 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
164  typedef T *type;
165  static type restore(CodeGenFunction &CGF, saved_type value) {
166  return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
167  }
168 };
169 
170 /// A specialization of DominatingValue for Address.
171 template <> struct DominatingValue<Address> {
172  typedef Address type;
173 
174  struct saved_type {
177  };
178 
179  static bool needsSaving(type value) {
180  return DominatingLLVMValue::needsSaving(value.getPointer());
181  }
182  static saved_type save(CodeGenFunction &CGF, type value) {
183  return { DominatingLLVMValue::save(CGF, value.getPointer()),
184  value.getAlignment() };
185  }
186  static type restore(CodeGenFunction &CGF, saved_type value) {
187  return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
188  value.Alignment);
189  }
190 };
191 
192 /// A specialization of DominatingValue for RValue.
193 template <> struct DominatingValue<RValue> {
194  typedef RValue type;
195  class saved_type {
196  enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
197  AggregateAddress, ComplexAddress };
198 
200  unsigned K : 3;
201  unsigned Align : 29;
202  saved_type(llvm::Value *v, Kind k, unsigned a = 0)
203  : Value(v), K(k), Align(a) {}
204 
205  public:
206  static bool needsSaving(RValue value);
207  static saved_type save(CodeGenFunction &CGF, RValue value);
208  RValue restore(CodeGenFunction &CGF);
209 
210  // implementations in CGCleanup.cpp
211  };
212 
213  static bool needsSaving(type value) {
214  return saved_type::needsSaving(value);
215  }
216  static saved_type save(CodeGenFunction &CGF, type value) {
217  return saved_type::save(CGF, value);
218  }
219  static type restore(CodeGenFunction &CGF, saved_type value) {
220  return value.restore(CGF);
221  }
222 };
223 
224 /// CodeGenFunction - This class organizes the per-function state that is used
225 /// while generating LLVM code.
227  CodeGenFunction(const CodeGenFunction &) = delete;
228  void operator=(const CodeGenFunction &) = delete;
229 
230  friend class CGCXXABI;
231 public:
232  /// A jump destination is an abstract label, branching to which may
233  /// require a jump out through normal cleanups.
234  struct JumpDest {
235  JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
236  JumpDest(llvm::BasicBlock *Block,
238  unsigned Index)
239  : Block(Block), ScopeDepth(Depth), Index(Index) {}
240 
241  bool isValid() const { return Block != nullptr; }
242  llvm::BasicBlock *getBlock() const { return Block; }
243  EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
244  unsigned getDestIndex() const { return Index; }
245 
246  // This should be used cautiously.
248  ScopeDepth = depth;
249  }
250 
251  private:
252  llvm::BasicBlock *Block;
254  unsigned Index;
255  };
256 
257  CodeGenModule &CGM; // Per-module state.
259 
260  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
263 
264  // Stores variables for which we can't generate correct lifetime markers
265  // because of jumps.
267 
268  // CodeGen lambda for loops and support for ordered clause
269  typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
270  JumpDest)>
272  typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
273  const unsigned, const bool)>
275 
276  // Codegen lambda for loop bounds in worksharing loop constructs
277  typedef llvm::function_ref<std::pair<LValue, LValue>(
280 
281  // Codegen lambda for loop bounds in dispatch-based loop implementation
282  typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
283  CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
284  Address UB)>
286 
287  /// CGBuilder insert helper. This function is called after an
288  /// instruction is created using Builder.
289  void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
290  llvm::BasicBlock *BB,
291  llvm::BasicBlock::iterator InsertPt) const;
292 
293  /// CurFuncDecl - Holds the Decl for the current outermost
294  /// non-closure context.
296  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
300  llvm::Function *CurFn = nullptr;
301 
302  // Holds coroutine data if the current function is a coroutine. We use a
303  // wrapper to manage its lifetime, so that we don't have to define CGCoroData
304  // in this header.
305  struct CGCoroInfo {
306  std::unique_ptr<CGCoroData> Data;
307  CGCoroInfo();
308  ~CGCoroInfo();
309  };
311 
312  bool isCoroutine() const {
313  return CurCoro.Data != nullptr;
314  }
315 
316  /// CurGD - The GlobalDecl for the current function being compiled.
318 
319  /// PrologueCleanupDepth - The cleanup depth enclosing all the
320  /// cleanups associated with the parameters.
322 
323  /// ReturnBlock - Unified return block.
325 
326  /// ReturnValue - The temporary alloca to hold the return
327  /// value. This is invalid iff the function has no return value.
328  Address ReturnValue = Address::invalid();
329 
330  /// Return true if a label was seen in the current scope.
332  if (CurLexicalScope)
333  return CurLexicalScope->hasLabels();
334  return !LabelMap.empty();
335  }
336 
337  /// AllocaInsertPoint - This is an instruction in the entry block before which
338  /// we prefer to insert allocas.
339  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
340 
341  /// API for captured statement code generation.
343  public:
345  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
346  explicit CGCapturedStmtInfo(const CapturedStmt &S,
348  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
349 
353  E = S.capture_end();
354  I != E; ++I, ++Field) {
355  if (I->capturesThis())
356  CXXThisFieldDecl = *Field;
357  else if (I->capturesVariable())
358  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
359  else if (I->capturesVariableByCopy())
360  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
361  }
362  }
363 
364  virtual ~CGCapturedStmtInfo();
365 
366  CapturedRegionKind getKind() const { return Kind; }
367 
368  virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
369  // Retrieve the value of the context parameter.
370  virtual llvm::Value *getContextValue() const { return ThisValue; }
371 
372  /// Lookup the captured field decl for a variable.
373  virtual const FieldDecl *lookup(const VarDecl *VD) const {
374  return CaptureFields.lookup(VD->getCanonicalDecl());
375  }
376 
377  bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
378  virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
379 
380  static bool classof(const CGCapturedStmtInfo *) {
381  return true;
382  }
383 
384  /// Emit the captured statement body.
385  virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
387  CGF.EmitStmt(S);
388  }
389 
390  /// Get the name of the capture helper.
391  virtual StringRef getHelperName() const { return "__captured_stmt"; }
392 
393  private:
394  /// The kind of captured statement being generated.
396 
397  /// Keep the map between VarDecl and FieldDecl.
398  llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
399 
400  /// The base address of the captured record, passed in as the first
401  /// argument of the parallel region function.
402  llvm::Value *ThisValue;
403 
404  /// Captured 'this' type.
405  FieldDecl *CXXThisFieldDecl;
406  };
407  CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
408 
409  /// RAII for correct setting/restoring of CapturedStmtInfo.
411  private:
412  CodeGenFunction &CGF;
413  CGCapturedStmtInfo *PrevCapturedStmtInfo;
414  public:
415  CGCapturedStmtRAII(CodeGenFunction &CGF,
416  CGCapturedStmtInfo *NewCapturedStmtInfo)
417  : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
418  CGF.CapturedStmtInfo = NewCapturedStmtInfo;
419  }
420  ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
421  };
422 
423  /// An abstract representation of regular/ObjC call/message targets.
425  /// The function declaration of the callee.
426  const Decl *CalleeDecl;
427 
428  public:
429  AbstractCallee() : CalleeDecl(nullptr) {}
430  AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
431  AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
432  bool hasFunctionDecl() const {
433  return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
434  }
435  const Decl *getDecl() const { return CalleeDecl; }
436  unsigned getNumParams() const {
437  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
438  return FD->getNumParams();
439  return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
440  }
441  const ParmVarDecl *getParamDecl(unsigned I) const {
442  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
443  return FD->getParamDecl(I);
444  return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
445  }
446  };
447 
448  /// Sanitizers enabled for this function.
450 
451  /// True if CodeGen currently emits code implementing sanitizer checks.
452  bool IsSanitizerScope = false;
453 
454  /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
456  CodeGenFunction *CGF;
457  public:
458  SanitizerScope(CodeGenFunction *CGF);
459  ~SanitizerScope();
460  };
461 
462  /// In C++, whether we are code generating a thunk. This controls whether we
463  /// should emit cleanups.
464  bool CurFuncIsThunk = false;
465 
466  /// In ARC, whether we should autorelease the return value.
467  bool AutoreleaseResult = false;
468 
469  /// Whether we processed a Microsoft-style asm block during CodeGen. These can
470  /// potentially set the return value.
471  bool SawAsmBlock = false;
472 
473  const NamedDecl *CurSEHParent = nullptr;
474 
475  /// True if the current function is an outlined SEH helper. This can be a
476  /// finally block or filter expression.
477  bool IsOutlinedSEHHelper = false;
478 
479  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
480  llvm::Value *BlockPointer = nullptr;
481 
482  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
483  FieldDecl *LambdaThisCaptureField = nullptr;
484 
485  /// A mapping from NRVO variables to the flags used to indicate
486  /// when the NRVO has been applied to this variable.
487  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
488 
492 
493  llvm::Instruction *CurrentFuncletPad = nullptr;
494 
495  class CallLifetimeEnd final : public EHScopeStack::Cleanup {
496  llvm::Value *Addr;
497  llvm::Value *Size;
498 
499  public:
501  : Addr(addr.getPointer()), Size(size) {}
502 
503  void Emit(CodeGenFunction &CGF, Flags flags) override {
504  CGF.EmitLifetimeEnd(Size, Addr);
505  }
506  };
507 
508  /// Header for data within LifetimeExtendedCleanupStack.
510  /// The size of the following cleanup object.
511  unsigned Size;
512  /// The kind of cleanup to push: a value from the CleanupKind enumeration.
513  unsigned Kind : 31;
514  /// Whether this is a conditional cleanup.
515  unsigned IsConditional : 1;
516 
517  size_t getSize() const { return Size; }
518  CleanupKind getKind() const { return (CleanupKind)Kind; }
519  bool isConditional() const { return IsConditional; }
520  };
521 
522  /// i32s containing the indexes of the cleanup destinations.
523  Address NormalCleanupDest = Address::invalid();
524 
525  unsigned NextCleanupDestIndex = 1;
526 
527  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
528  CGBlockInfo *FirstBlockInfo = nullptr;
529 
530  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
531  llvm::BasicBlock *EHResumeBlock = nullptr;
532 
533  /// The exception slot. All landing pads write the current exception pointer
534  /// into this alloca.
535  llvm::Value *ExceptionSlot = nullptr;
536 
537  /// The selector slot. Under the MandatoryCleanup model, all landing pads
538  /// write the current selector value into this alloca.
539  llvm::AllocaInst *EHSelectorSlot = nullptr;
540 
541  /// A stack of exception code slots. Entering an __except block pushes a slot
542  /// on the stack and leaving pops one. The __exception_code() intrinsic loads
543  /// a value from the top of the stack.
545 
546  /// Value returned by __exception_info intrinsic.
547  llvm::Value *SEHInfo = nullptr;
548 
549  /// Emits a landing pad for the current EH stack.
550  llvm::BasicBlock *EmitLandingPad();
551 
552  llvm::BasicBlock *getInvokeDestImpl();
553 
554  template <class T>
556  return DominatingValue<T>::save(*this, value);
557  }
558 
559 public:
560  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
561  /// rethrows.
563 
564  /// A class controlling the emission of a finally block.
565  class FinallyInfo {
566  /// Where the catchall's edge through the cleanup should go.
567  JumpDest RethrowDest;
568 
569  /// A function to call to enter the catch.
570  llvm::FunctionCallee BeginCatchFn;
571 
572  /// An i1 variable indicating whether or not the @finally is
573  /// running for an exception.
574  llvm::AllocaInst *ForEHVar;
575 
576  /// An i8* variable into which the exception pointer to rethrow
577  /// has been saved.
578  llvm::AllocaInst *SavedExnVar;
579 
580  public:
581  void enter(CodeGenFunction &CGF, const Stmt *Finally,
582  llvm::FunctionCallee beginCatchFn,
583  llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
584  void exit(CodeGenFunction &CGF);
585  };
586 
587  /// Returns true inside SEH __try blocks.
588  bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
589 
590  /// Returns true while emitting a cleanuppad.
591  bool isCleanupPadScope() const {
592  return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
593  }
594 
595  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
596  /// current full-expression. Safe against the possibility that
597  /// we're currently inside a conditionally-evaluated expression.
598  template <class T, class... As>
600  // If we're not in a conditional branch, or if none of the
601  // arguments requires saving, then use the unconditional cleanup.
602  if (!isInConditionalBranch())
603  return EHStack.pushCleanup<T>(kind, A...);
604 
605  // Stash values in a tuple so we can guarantee the order of saves.
606  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
607  SavedTuple Saved{saveValueInCond(A)...};
608 
609  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
610  EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
611  initFullExprCleanup();
612  }
613 
614  /// Queue a cleanup to be pushed after finishing the current
615  /// full-expression.
616  template <class T, class... As>
618  if (!isInConditionalBranch())
619  return pushCleanupAfterFullExprImpl<T>(Kind, Address::invalid(), A...);
620 
621  Address ActiveFlag = createCleanupActiveFlag();
622  assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
623  "cleanup active flag should never need saving");
624 
625  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
626  SavedTuple Saved{saveValueInCond(A)...};
627 
628  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
629  pushCleanupAfterFullExprImpl<CleanupType>(Kind, ActiveFlag, Saved);
630  }
631 
632  template <class T, class... As>
634  As... A) {
635  LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
636  ActiveFlag.isValid()};
637 
638  size_t OldSize = LifetimeExtendedCleanupStack.size();
639  LifetimeExtendedCleanupStack.resize(
640  LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
641  (Header.IsConditional ? sizeof(ActiveFlag) : 0));
642 
643  static_assert(sizeof(Header) % alignof(T) == 0,
644  "Cleanup will be allocated on misaligned address");
645  char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
646  new (Buffer) LifetimeExtendedCleanupHeader(Header);
647  new (Buffer + sizeof(Header)) T(A...);
648  if (Header.IsConditional)
649  new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
650  }
651 
652  /// Set up the last cleanup that was pushed as a conditional
653  /// full-expression cleanup.
655  initFullExprCleanupWithFlag(createCleanupActiveFlag());
656  }
657 
658  void initFullExprCleanupWithFlag(Address ActiveFlag);
659  Address createCleanupActiveFlag();
660 
661  /// PushDestructorCleanup - Push a cleanup to call the
662  /// complete-object destructor of an object of the given type at the
663  /// given address. Does nothing if T is not a C++ class type with a
664  /// non-trivial destructor.
665  void PushDestructorCleanup(QualType T, Address Addr);
666 
667  /// PushDestructorCleanup - Push a cleanup to call the
668  /// complete-object variant of the given destructor on the object at
669  /// the given address.
670  void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
671 
672  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
673  /// process all branch fixups.
674  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
675 
676  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
677  /// The block cannot be reactivated. Pops it if it's the top of the
678  /// stack.
679  ///
680  /// \param DominatingIP - An instruction which is known to
681  /// dominate the current IP (if set) and which lies along
682  /// all paths of execution between the current IP and the
683  /// the point at which the cleanup comes into scope.
684  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
685  llvm::Instruction *DominatingIP);
686 
687  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
688  /// Cannot be used to resurrect a deactivated cleanup.
689  ///
690  /// \param DominatingIP - An instruction which is known to
691  /// dominate the current IP (if set) and which lies along
692  /// all paths of execution between the current IP and the
693  /// the point at which the cleanup comes into scope.
694  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
695  llvm::Instruction *DominatingIP);
696 
697  /// Enters a new scope for capturing cleanups, all of which
698  /// will be executed once the scope is exited.
700  EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
701  size_t LifetimeExtendedCleanupStackSize;
702  bool OldDidCallStackSave;
703  protected:
705  private:
706 
707  RunCleanupsScope(const RunCleanupsScope &) = delete;
708  void operator=(const RunCleanupsScope &) = delete;
709 
710  protected:
711  CodeGenFunction& CGF;
712 
713  public:
714  /// Enter a new cleanup scope.
715  explicit RunCleanupsScope(CodeGenFunction &CGF)
716  : PerformCleanup(true), CGF(CGF)
717  {
718  CleanupStackDepth = CGF.EHStack.stable_begin();
719  LifetimeExtendedCleanupStackSize =
720  CGF.LifetimeExtendedCleanupStack.size();
721  OldDidCallStackSave = CGF.DidCallStackSave;
722  CGF.DidCallStackSave = false;
723  OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
724  CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
725  }
726 
727  /// Exit this cleanup scope, emitting any accumulated cleanups.
729  if (PerformCleanup)
730  ForceCleanup();
731  }
732 
733  /// Determine whether this scope requires any cleanups.
734  bool requiresCleanups() const {
735  return CGF.EHStack.stable_begin() != CleanupStackDepth;
736  }
737 
738  /// Force the emission of cleanups now, instead of waiting
739  /// until this object is destroyed.
740  /// \param ValuesToReload - A list of values that need to be available at
741  /// the insertion point after cleanup emission. If cleanup emission created
742  /// a shared cleanup block, these value pointers will be rewritten.
743  /// Otherwise, they not will be modified.
744  void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
745  assert(PerformCleanup && "Already forced cleanup");
746  CGF.DidCallStackSave = OldDidCallStackSave;
747  CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
748  ValuesToReload);
749  PerformCleanup = false;
750  CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
751  }
752  };
753 
754  // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
755  EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
756  EHScopeStack::stable_end();
757 
759  SourceRange Range;
761  LexicalScope *ParentScope;
762 
763  LexicalScope(const LexicalScope &) = delete;
764  void operator=(const LexicalScope &) = delete;
765 
766  public:
767  /// Enter a new cleanup scope.
768  explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
769  : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
770  CGF.CurLexicalScope = this;
771  if (CGDebugInfo *DI = CGF.getDebugInfo())
772  DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
773  }
774 
775  void addLabel(const LabelDecl *label) {
776  assert(PerformCleanup && "adding label to dead scope?");
777  Labels.push_back(label);
778  }
779 
780  /// Exit this cleanup scope, emitting any accumulated
781  /// cleanups.
783  if (CGDebugInfo *DI = CGF.getDebugInfo())
784  DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
785 
786  // If we should perform a cleanup, force them now. Note that
787  // this ends the cleanup scope before rescoping any labels.
788  if (PerformCleanup) {
789  ApplyDebugLocation DL(CGF, Range.getEnd());
790  ForceCleanup();
791  }
792  }
793 
794  /// Force the emission of cleanups now, instead of waiting
795  /// until this object is destroyed.
796  void ForceCleanup() {
797  CGF.CurLexicalScope = ParentScope;
798  RunCleanupsScope::ForceCleanup();
799 
800  if (!Labels.empty())
801  rescopeLabels();
802  }
803 
804  bool hasLabels() const {
805  return !Labels.empty();
806  }
807 
808  void rescopeLabels();
809  };
810 
811  typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
812 
813  /// The class used to assign some variables some temporarily addresses.
814  class OMPMapVars {
815  DeclMapTy SavedLocals;
816  DeclMapTy SavedTempAddresses;
817  OMPMapVars(const OMPMapVars &) = delete;
818  void operator=(const OMPMapVars &) = delete;
819 
820  public:
821  explicit OMPMapVars() = default;
823  assert(SavedLocals.empty() && "Did not restored original addresses.");
824  };
825 
826  /// Sets the address of the variable \p LocalVD to be \p TempAddr in
827  /// function \p CGF.
828  /// \return true if at least one variable was set already, false otherwise.
829  bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
830  Address TempAddr) {
831  LocalVD = LocalVD->getCanonicalDecl();
832  // Only save it once.
833  if (SavedLocals.count(LocalVD)) return false;
834 
835  // Copy the existing local entry to SavedLocals.
836  auto it = CGF.LocalDeclMap.find(LocalVD);
837  if (it != CGF.LocalDeclMap.end())
838  SavedLocals.try_emplace(LocalVD, it->second);
839  else
840  SavedLocals.try_emplace(LocalVD, Address::invalid());
841 
842  // Generate the private entry.
843  QualType VarTy = LocalVD->getType();
844  if (VarTy->isReferenceType()) {
845  Address Temp = CGF.CreateMemTemp(VarTy);
846  CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
847  TempAddr = Temp;
848  }
849  SavedTempAddresses.try_emplace(LocalVD, TempAddr);
850 
851  return true;
852  }
853 
854  /// Applies new addresses to the list of the variables.
855  /// \return true if at least one variable is using new address, false
856  /// otherwise.
857  bool apply(CodeGenFunction &CGF) {
858  copyInto(SavedTempAddresses, CGF.LocalDeclMap);
859  SavedTempAddresses.clear();
860  return !SavedLocals.empty();
861  }
862 
863  /// Restores original addresses of the variables.
864  void restore(CodeGenFunction &CGF) {
865  if (!SavedLocals.empty()) {
866  copyInto(SavedLocals, CGF.LocalDeclMap);
867  SavedLocals.clear();
868  }
869  }
870 
871  private:
872  /// Copy all the entries in the source map over the corresponding
873  /// entries in the destination, which must exist.
874  static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
875  for (auto &Pair : Src) {
876  if (!Pair.second.isValid()) {
877  Dest.erase(Pair.first);
878  continue;
879  }
880 
881  auto I = Dest.find(Pair.first);
882  if (I != Dest.end())
883  I->second = Pair.second;
884  else
885  Dest.insert(Pair);
886  }
887  }
888  };
889 
890  /// The scope used to remap some variables as private in the OpenMP loop body
891  /// (or other captured region emitted without outlining), and to restore old
892  /// vars back on exit.
894  OMPMapVars MappedVars;
895  OMPPrivateScope(const OMPPrivateScope &) = delete;
896  void operator=(const OMPPrivateScope &) = delete;
897 
898  public:
899  /// Enter a new OpenMP private scope.
900  explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
901 
902  /// Registers \p LocalVD variable as a private and apply \p PrivateGen
903  /// function for it to generate corresponding private variable. \p
904  /// PrivateGen returns an address of the generated private variable.
905  /// \return true if the variable is registered as private, false if it has
906  /// been privatized already.
907  bool addPrivate(const VarDecl *LocalVD,
908  const llvm::function_ref<Address()> PrivateGen) {
909  assert(PerformCleanup && "adding private to dead scope");
910  return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
911  }
912 
913  /// Privatizes local variables previously registered as private.
914  /// Registration is separate from the actual privatization to allow
915  /// initializers use values of the original variables, not the private one.
916  /// This is important, for example, if the private variable is a class
917  /// variable initialized by a constructor that references other private
918  /// variables. But at initialization original variables must be used, not
919  /// private copies.
920  /// \return true if at least one variable was privatized, false otherwise.
921  bool Privatize() { return MappedVars.apply(CGF); }
922 
923  void ForceCleanup() {
924  RunCleanupsScope::ForceCleanup();
925  MappedVars.restore(CGF);
926  }
927 
928  /// Exit scope - all the mapped variables are restored.
930  if (PerformCleanup)
931  ForceCleanup();
932  }
933 
934  /// Checks if the global variable is captured in current function.
935  bool isGlobalVarCaptured(const VarDecl *VD) const {
936  VD = VD->getCanonicalDecl();
937  return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
938  }
939  };
940 
941  /// Takes the old cleanup stack size and emits the cleanup blocks
942  /// that have been added.
943  void
944  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
945  std::initializer_list<llvm::Value **> ValuesToReload = {});
946 
947  /// Takes the old cleanup stack size and emits the cleanup blocks
948  /// that have been added, then adds all lifetime-extended cleanups from
949  /// the given position to the stack.
950  void
951  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
952  size_t OldLifetimeExtendedStackSize,
953  std::initializer_list<llvm::Value **> ValuesToReload = {});
954 
955  void ResolveBranchFixups(llvm::BasicBlock *Target);
956 
957  /// The given basic block lies in the current EH scope, but may be a
958  /// target of a potentially scope-crossing jump; get a stable handle
959  /// to which we can perform this jump later.
960  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
961  return JumpDest(Target,
962  EHStack.getInnermostNormalCleanup(),
963  NextCleanupDestIndex++);
964  }
965 
966  /// The given basic block lies in the current EH scope, but may be a
967  /// target of a potentially scope-crossing jump; get a stable handle
968  /// to which we can perform this jump later.
969  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
970  return getJumpDestInCurrentScope(createBasicBlock(Name));
971  }
972 
973  /// EmitBranchThroughCleanup - Emit a branch from the current insert
974  /// block through the normal cleanup handling code (if any) and then
975  /// on to \arg Dest.
976  void EmitBranchThroughCleanup(JumpDest Dest);
977 
978  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
979  /// specified destination obviously has no cleanups to run. 'false' is always
980  /// a conservatively correct answer for this method.
981  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
982 
983  /// popCatchScope - Pops the catch scope at the top of the EHScope
984  /// stack, emitting any required code (other than the catch handlers
985  /// themselves).
986  void popCatchScope();
987 
988  llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
989  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
990  llvm::BasicBlock *
991  getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
992 
993  /// An object to manage conditionally-evaluated expressions.
995  llvm::BasicBlock *StartBB;
996 
997  public:
998  ConditionalEvaluation(CodeGenFunction &CGF)
999  : StartBB(CGF.Builder.GetInsertBlock()) {}
1000 
1001  void begin(CodeGenFunction &CGF) {
1002  assert(CGF.OutermostConditional != this);
1003  if (!CGF.OutermostConditional)
1004  CGF.OutermostConditional = this;
1005  }
1006 
1007  void end(CodeGenFunction &CGF) {
1008  assert(CGF.OutermostConditional != nullptr);
1009  if (CGF.OutermostConditional == this)
1010  CGF.OutermostConditional = nullptr;
1011  }
1012 
1013  /// Returns a block which will be executed prior to each
1014  /// evaluation of the conditional code.
1015  llvm::BasicBlock *getStartingBlock() const {
1016  return StartBB;
1017  }
1018  };
1019 
1020  /// isInConditionalBranch - Return true if we're currently emitting
1021  /// one branch or the other of a conditional expression.
1022  bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1023 
1025  assert(isInConditionalBranch());
1026  llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1027  auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
1028  store->setAlignment(addr.getAlignment().getQuantity());
1029  }
1030 
1031  /// An RAII object to record that we're evaluating a statement
1032  /// expression.
1034  CodeGenFunction &CGF;
1035 
1036  /// We have to save the outermost conditional: cleanups in a
1037  /// statement expression aren't conditional just because the
1038  /// StmtExpr is.
1039  ConditionalEvaluation *SavedOutermostConditional;
1040 
1041  public:
1042  StmtExprEvaluation(CodeGenFunction &CGF)
1043  : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1044  CGF.OutermostConditional = nullptr;
1045  }
1046 
1048  CGF.OutermostConditional = SavedOutermostConditional;
1049  CGF.EnsureInsertPoint();
1050  }
1051  };
1052 
1053  /// An object which temporarily prevents a value from being
1054  /// destroyed by aggressive peephole optimizations that assume that
1055  /// all uses of a value have been realized in the IR.
1057  llvm::Instruction *Inst;
1058  friend class CodeGenFunction;
1059 
1060  public:
1061  PeepholeProtection() : Inst(nullptr) {}
1062  };
1063 
1064  /// A non-RAII class containing all the information about a bound
1065  /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1066  /// this which makes individual mappings very simple; using this
1067  /// class directly is useful when you have a variable number of
1068  /// opaque values or don't want the RAII functionality for some
1069  /// reason.
1071  const OpaqueValueExpr *OpaqueValue;
1072  bool BoundLValue;
1074 
1076  bool boundLValue)
1077  : OpaqueValue(ov), BoundLValue(boundLValue) {}
1078  public:
1079  OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1080 
1081  static bool shouldBindAsLValue(const Expr *expr) {
1082  // gl-values should be bound as l-values for obvious reasons.
1083  // Records should be bound as l-values because IR generation
1084  // always keeps them in memory. Expressions of function type
1085  // act exactly like l-values but are formally required to be
1086  // r-values in C.
1087  return expr->isGLValue() ||
1088  expr->getType()->isFunctionType() ||
1089  hasAggregateEvaluationKind(expr->getType());
1090  }
1091 
1092  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1093  const OpaqueValueExpr *ov,
1094  const Expr *e) {
1095  if (shouldBindAsLValue(ov))
1096  return bind(CGF, ov, CGF.EmitLValue(e));
1097  return bind(CGF, ov, CGF.EmitAnyExpr(e));
1098  }
1099 
1100  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1101  const OpaqueValueExpr *ov,
1102  const LValue &lv) {
1103  assert(shouldBindAsLValue(ov));
1104  CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1105  return OpaqueValueMappingData(ov, true);
1106  }
1107 
1108  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1109  const OpaqueValueExpr *ov,
1110  const RValue &rv) {
1111  assert(!shouldBindAsLValue(ov));
1112  CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1113 
1114  OpaqueValueMappingData data(ov, false);
1115 
1116  // Work around an extremely aggressive peephole optimization in
1117  // EmitScalarConversion which assumes that all other uses of a
1118  // value are extant.
1119  data.Protection = CGF.protectFromPeepholes(rv);
1120 
1121  return data;
1122  }
1123 
1124  bool isValid() const { return OpaqueValue != nullptr; }
1125  void clear() { OpaqueValue = nullptr; }
1126 
1127  void unbind(CodeGenFunction &CGF) {
1128  assert(OpaqueValue && "no data to unbind!");
1129 
1130  if (BoundLValue) {
1131  CGF.OpaqueLValues.erase(OpaqueValue);
1132  } else {
1133  CGF.OpaqueRValues.erase(OpaqueValue);
1134  CGF.unprotectFromPeepholes(Protection);
1135  }
1136  }
1137  };
1138 
1139  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1141  CodeGenFunction &CGF;
1143 
1144  public:
1145  static bool shouldBindAsLValue(const Expr *expr) {
1146  return OpaqueValueMappingData::shouldBindAsLValue(expr);
1147  }
1148 
1149  /// Build the opaque value mapping for the given conditional
1150  /// operator if it's the GNU ?: extension. This is a common
1151  /// enough pattern that the convenience operator is really
1152  /// helpful.
1153  ///
1154  OpaqueValueMapping(CodeGenFunction &CGF,
1155  const AbstractConditionalOperator *op) : CGF(CGF) {
1156  if (isa<ConditionalOperator>(op))
1157  // Leave Data empty.
1158  return;
1159 
1160  const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1161  Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1162  e->getCommon());
1163  }
1164 
1165  /// Build the opaque value mapping for an OpaqueValueExpr whose source
1166  /// expression is set to the expression the OVE represents.
1167  OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
1168  : CGF(CGF) {
1169  if (OV) {
1170  assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1171  "for OVE with no source expression");
1172  Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
1173  }
1174  }
1175 
1176  OpaqueValueMapping(CodeGenFunction &CGF,
1177  const OpaqueValueExpr *opaqueValue,
1178  LValue lvalue)
1179  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1180  }
1181 
1182  OpaqueValueMapping(CodeGenFunction &CGF,
1183  const OpaqueValueExpr *opaqueValue,
1184  RValue rvalue)
1185  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1186  }
1187 
1188  void pop() {
1189  Data.unbind(CGF);
1190  Data.clear();
1191  }
1192 
1194  if (Data.isValid()) Data.unbind(CGF);
1195  }
1196  };
1197 
1198 private:
1199  CGDebugInfo *DebugInfo;
1200  /// Used to create unique names for artificial VLA size debug info variables.
1201  unsigned VLAExprCounter = 0;
1202  bool DisableDebugInfo = false;
1203 
1204  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1205  /// calling llvm.stacksave for multiple VLAs in the same scope.
1206  bool DidCallStackSave = false;
1207 
1208  /// IndirectBranch - The first time an indirect goto is seen we create a block
1209  /// with an indirect branch. Every time we see the address of a label taken,
1210  /// we add the label to the indirect goto. Every subsequent indirect goto is
1211  /// codegen'd as a jump to the IndirectBranch's basic block.
1212  llvm::IndirectBrInst *IndirectBranch = nullptr;
1213 
1214  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1215  /// decls.
1216  DeclMapTy LocalDeclMap;
1217 
1218  // Keep track of the cleanups for callee-destructed parameters pushed to the
1219  // cleanup stack so that they can be deactivated later.
1220  llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1221  CalleeDestructedParamCleanups;
1222 
1223  /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1224  /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1225  /// parameter.
1226  llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1227  SizeArguments;
1228 
1229  /// Track escaped local variables with auto storage. Used during SEH
1230  /// outlining to produce a call to llvm.localescape.
1231  llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1232 
1233  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1234  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1235 
1236  // BreakContinueStack - This keeps track of where break and continue
1237  // statements should jump to.
1238  struct BreakContinue {
1239  BreakContinue(JumpDest Break, JumpDest Continue)
1240  : BreakBlock(Break), ContinueBlock(Continue) {}
1241 
1242  JumpDest BreakBlock;
1243  JumpDest ContinueBlock;
1244  };
1245  SmallVector<BreakContinue, 8> BreakContinueStack;
1246 
1247  /// Handles cancellation exit points in OpenMP-related constructs.
1248  class OpenMPCancelExitStack {
1249  /// Tracks cancellation exit point and join point for cancel-related exit
1250  /// and normal exit.
1251  struct CancelExit {
1252  CancelExit() = default;
1253  CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1254  JumpDest ContBlock)
1255  : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1257  /// true if the exit block has been emitted already by the special
1258  /// emitExit() call, false if the default codegen is used.
1259  bool HasBeenEmitted = false;
1260  JumpDest ExitBlock;
1261  JumpDest ContBlock;
1262  };
1263 
1265 
1266  public:
1267  OpenMPCancelExitStack() : Stack(1) {}
1268  ~OpenMPCancelExitStack() = default;
1269  /// Fetches the exit block for the current OpenMP construct.
1270  JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1271  /// Emits exit block with special codegen procedure specific for the related
1272  /// OpenMP construct + emits code for normal construct cleanup.
1273  void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1274  const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1275  if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1276  assert(CGF.getOMPCancelDestination(Kind).isValid());
1277  assert(CGF.HaveInsertPoint());
1278  assert(!Stack.back().HasBeenEmitted);
1279  auto IP = CGF.Builder.saveAndClearIP();
1280  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1281  CodeGen(CGF);
1282  CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1283  CGF.Builder.restoreIP(IP);
1284  Stack.back().HasBeenEmitted = true;
1285  }
1286  CodeGen(CGF);
1287  }
1288  /// Enter the cancel supporting \a Kind construct.
1289  /// \param Kind OpenMP directive that supports cancel constructs.
1290  /// \param HasCancel true, if the construct has inner cancel directive,
1291  /// false otherwise.
1292  void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1293  Stack.push_back({Kind,
1294  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1295  : JumpDest(),
1296  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1297  : JumpDest()});
1298  }
1299  /// Emits default exit point for the cancel construct (if the special one
1300  /// has not be used) + join point for cancel/normal exits.
1301  void exit(CodeGenFunction &CGF) {
1302  if (getExitBlock().isValid()) {
1303  assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1304  bool HaveIP = CGF.HaveInsertPoint();
1305  if (!Stack.back().HasBeenEmitted) {
1306  if (HaveIP)
1307  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1308  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1309  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1310  }
1311  CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1312  if (!HaveIP) {
1313  CGF.Builder.CreateUnreachable();
1314  CGF.Builder.ClearInsertionPoint();
1315  }
1316  }
1317  Stack.pop_back();
1318  }
1319  };
1320  OpenMPCancelExitStack OMPCancelStack;
1321 
1322  CodeGenPGO PGO;
1323 
1324  /// Calculate branch weights appropriate for PGO data
1325  llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount);
1326  llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights);
1327  llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1328  uint64_t LoopCount);
1329 
1330 public:
1331  /// Increment the profiler's counter for the given statement by \p StepV.
1332  /// If \p StepV is null, the default increment is 1.
1333  void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1335  PGO.emitCounterIncrement(Builder, S, StepV);
1336  PGO.setCurrentStmt(S);
1337  }
1338 
1339  /// Get the profiler's count for the given statement.
1340  uint64_t getProfileCount(const Stmt *S) {
1341  Optional<uint64_t> Count = PGO.getStmtCount(S);
1342  if (!Count.hasValue())
1343  return 0;
1344  return *Count;
1345  }
1346 
1347  /// Set the profiler's current count.
1348  void setCurrentProfileCount(uint64_t Count) {
1349  PGO.setCurrentRegionCount(Count);
1350  }
1351 
1352  /// Get the profiler's current count. This is generally the count for the most
1353  /// recently incremented counter.
1355  return PGO.getCurrentRegionCount();
1356  }
1357 
1358 private:
1359 
1360  /// SwitchInsn - This is nearest current switch instruction. It is null if
1361  /// current context is not in a switch.
1362  llvm::SwitchInst *SwitchInsn = nullptr;
1363  /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1364  SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1365 
1366  /// CaseRangeBlock - This block holds if condition check for last case
1367  /// statement range in current switch instruction.
1368  llvm::BasicBlock *CaseRangeBlock = nullptr;
1369 
1370  /// OpaqueLValues - Keeps track of the current set of opaque value
1371  /// expressions.
1372  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1373  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1374 
1375  // VLASizeMap - This keeps track of the associated size for each VLA type.
1376  // We track this by the size expression rather than the type itself because
1377  // in certain situations, like a const qualifier applied to an VLA typedef,
1378  // multiple VLA types can share the same size expression.
1379  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1380  // enter/leave scopes.
1381  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1382 
1383  /// A block containing a single 'unreachable' instruction. Created
1384  /// lazily by getUnreachableBlock().
1385  llvm::BasicBlock *UnreachableBlock = nullptr;
1386 
1387  /// Counts of the number return expressions in the function.
1388  unsigned NumReturnExprs = 0;
1389 
1390  /// Count the number of simple (constant) return expressions in the function.
1391  unsigned NumSimpleReturnExprs = 0;
1392 
1393  /// The last regular (non-return) debug location (breakpoint) in the function.
1394  SourceLocation LastStopPoint;
1395 
1396 public:
1397  /// Source location information about the default argument or member
1398  /// initializer expression we're evaluating, if any.
1400  using SourceLocExprScopeGuard =
1402 
1403  /// A scope within which we are constructing the fields of an object which
1404  /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1405  /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1407  public:
1408  FieldConstructionScope(CodeGenFunction &CGF, Address This)
1409  : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1410  CGF.CXXDefaultInitExprThis = This;
1411  }
1413  CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1414  }
1415 
1416  private:
1417  CodeGenFunction &CGF;
1418  Address OldCXXDefaultInitExprThis;
1419  };
1420 
1421  /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1422  /// is overridden to be the object under construction.
1424  public:
1425  CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
1426  : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1427  OldCXXThisAlignment(CGF.CXXThisAlignment),
1428  SourceLocScope(E, CGF.CurSourceLocExprScope) {
1429  CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
1430  CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1431  }
1433  CGF.CXXThisValue = OldCXXThisValue;
1434  CGF.CXXThisAlignment = OldCXXThisAlignment;
1435  }
1436 
1437  public:
1438  CodeGenFunction &CGF;
1442  };
1443 
1445  CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
1446  : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {}
1447  };
1448 
1449  /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1450  /// current loop index is overridden.
1452  public:
1453  ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1454  : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1455  CGF.ArrayInitIndex = Index;
1456  }
1458  CGF.ArrayInitIndex = OldArrayInitIndex;
1459  }
1460 
1461  private:
1462  CodeGenFunction &CGF;
1463  llvm::Value *OldArrayInitIndex;
1464  };
1465 
1467  public:
1469  : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1470  OldCurCodeDecl(CGF.CurCodeDecl),
1471  OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1472  OldCXXABIThisValue(CGF.CXXABIThisValue),
1473  OldCXXThisValue(CGF.CXXThisValue),
1474  OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1475  OldCXXThisAlignment(CGF.CXXThisAlignment),
1476  OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1477  OldCXXInheritedCtorInitExprArgs(
1478  std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1479  CGF.CurGD = GD;
1480  CGF.CurFuncDecl = CGF.CurCodeDecl =
1481  cast<CXXConstructorDecl>(GD.getDecl());
1482  CGF.CXXABIThisDecl = nullptr;
1483  CGF.CXXABIThisValue = nullptr;
1484  CGF.CXXThisValue = nullptr;
1485  CGF.CXXABIThisAlignment = CharUnits();
1486  CGF.CXXThisAlignment = CharUnits();
1487  CGF.ReturnValue = Address::invalid();
1488  CGF.FnRetTy = QualType();
1489  CGF.CXXInheritedCtorInitExprArgs.clear();
1490  }
1492  CGF.CurGD = OldCurGD;
1493  CGF.CurFuncDecl = OldCurFuncDecl;
1494  CGF.CurCodeDecl = OldCurCodeDecl;
1495  CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1496  CGF.CXXABIThisValue = OldCXXABIThisValue;
1497  CGF.CXXThisValue = OldCXXThisValue;
1498  CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1499  CGF.CXXThisAlignment = OldCXXThisAlignment;
1500  CGF.ReturnValue = OldReturnValue;
1501  CGF.FnRetTy = OldFnRetTy;
1502  CGF.CXXInheritedCtorInitExprArgs =
1503  std::move(OldCXXInheritedCtorInitExprArgs);
1504  }
1505 
1506  private:
1507  CodeGenFunction &CGF;
1508  GlobalDecl OldCurGD;
1509  const Decl *OldCurFuncDecl;
1510  const Decl *OldCurCodeDecl;
1511  ImplicitParamDecl *OldCXXABIThisDecl;
1512  llvm::Value *OldCXXABIThisValue;
1513  llvm::Value *OldCXXThisValue;
1514  CharUnits OldCXXABIThisAlignment;
1515  CharUnits OldCXXThisAlignment;
1516  Address OldReturnValue;
1517  QualType OldFnRetTy;
1518  CallArgList OldCXXInheritedCtorInitExprArgs;
1519  };
1520 
1521 private:
1522  /// CXXThisDecl - When generating code for a C++ member function,
1523  /// this will hold the implicit 'this' declaration.
1524  ImplicitParamDecl *CXXABIThisDecl = nullptr;
1525  llvm::Value *CXXABIThisValue = nullptr;
1526  llvm::Value *CXXThisValue = nullptr;
1527  CharUnits CXXABIThisAlignment;
1528  CharUnits CXXThisAlignment;
1529 
1530  /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
1531  /// this expression.
1532  Address CXXDefaultInitExprThis = Address::invalid();
1533 
1534  /// The current array initialization index when evaluating an
1535  /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
1536  llvm::Value *ArrayInitIndex = nullptr;
1537 
1538  /// The values of function arguments to use when evaluating
1539  /// CXXInheritedCtorInitExprs within this context.
1540  CallArgList CXXInheritedCtorInitExprArgs;
1541 
1542  /// CXXStructorImplicitParamDecl - When generating code for a constructor or
1543  /// destructor, this will hold the implicit argument (e.g. VTT).
1544  ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
1545  llvm::Value *CXXStructorImplicitParamValue = nullptr;
1546 
1547  /// OutermostConditional - Points to the outermost active
1548  /// conditional control. This is used so that we know if a
1549  /// temporary should be destroyed conditionally.
1550  ConditionalEvaluation *OutermostConditional = nullptr;
1551 
1552  /// The current lexical scope.
1553  LexicalScope *CurLexicalScope = nullptr;
1554 
1555  /// The current source location that should be used for exception
1556  /// handling code.
1557  SourceLocation CurEHLocation;
1558 
1559  /// BlockByrefInfos - For each __block variable, contains
1560  /// information about the layout of the variable.
1561  llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
1562 
1563  /// Used by -fsanitize=nullability-return to determine whether the return
1564  /// value can be checked.
1565  llvm::Value *RetValNullabilityPrecondition = nullptr;
1566 
1567  /// Check if -fsanitize=nullability-return instrumentation is required for
1568  /// this function.
1569  bool requiresReturnValueNullabilityCheck() const {
1570  return RetValNullabilityPrecondition;
1571  }
1572 
1573  /// Used to store precise source locations for return statements by the
1574  /// runtime return value checks.
1575  Address ReturnLocation = Address::invalid();
1576 
1577  /// Check if the return value of this function requires sanitization.
1578  bool requiresReturnValueCheck() const {
1579  return requiresReturnValueNullabilityCheck() ||
1580  (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1581  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>());
1582  }
1583 
1584  llvm::BasicBlock *TerminateLandingPad = nullptr;
1585  llvm::BasicBlock *TerminateHandler = nullptr;
1586  llvm::BasicBlock *TrapBB = nullptr;
1587 
1588  /// Terminate funclets keyed by parent funclet pad.
1589  llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
1590 
1591  /// Largest vector width used in ths function. Will be used to create a
1592  /// function attribute.
1593  unsigned LargestVectorWidth = 0;
1594 
1595  /// True if we need emit the life-time markers.
1596  const bool ShouldEmitLifetimeMarkers;
1597 
1598  /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
1599  /// the function metadata.
1600  void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
1601  llvm::Function *Fn);
1602 
1603 public:
1604  CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1605  ~CodeGenFunction();
1606 
1607  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1608  ASTContext &getContext() const { return CGM.getContext(); }
1610  if (DisableDebugInfo)
1611  return nullptr;
1612  return DebugInfo;
1613  }
1614  void disableDebugInfo() { DisableDebugInfo = true; }
1615  void enableDebugInfo() { DisableDebugInfo = false; }
1616 
1618  return CGM.getCodeGenOpts().OptimizationLevel == 0;
1619  }
1620 
1621  const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1622 
1623  /// Returns a pointer to the function's exception object and selector slot,
1624  /// which is assigned in every landing pad.
1625  Address getExceptionSlot();
1626  Address getEHSelectorSlot();
1627 
1628  /// Returns the contents of the function's exception object and selector
1629  /// slots.
1630  llvm::Value *getExceptionFromSlot();
1631  llvm::Value *getSelectorFromSlot();
1632 
1633  Address getNormalCleanupDestSlot();
1634 
1635  llvm::BasicBlock *getUnreachableBlock() {
1636  if (!UnreachableBlock) {
1637  UnreachableBlock = createBasicBlock("unreachable");
1638  new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1639  }
1640  return UnreachableBlock;
1641  }
1642 
1643  llvm::BasicBlock *getInvokeDest() {
1644  if (!EHStack.requiresLandingPad()) return nullptr;
1645  return getInvokeDestImpl();
1646  }
1647 
1648  bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; }
1649 
1650  const TargetInfo &getTarget() const { return Target; }
1651  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1653  return CGM.getTargetCodeGenInfo();
1654  }
1655 
1656  //===--------------------------------------------------------------------===//
1657  // Cleanups
1658  //===--------------------------------------------------------------------===//
1659 
1660  typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
1661 
1662  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1663  Address arrayEndPointer,
1664  QualType elementType,
1665  CharUnits elementAlignment,
1666  Destroyer *destroyer);
1667  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1668  llvm::Value *arrayEnd,
1669  QualType elementType,
1670  CharUnits elementAlignment,
1671  Destroyer *destroyer);
1672 
1673  void pushDestroy(QualType::DestructionKind dtorKind,
1674  Address addr, QualType type);
1675  void pushEHDestroy(QualType::DestructionKind dtorKind,
1676  Address addr, QualType type);
1677  void pushDestroy(CleanupKind kind, Address addr, QualType type,
1678  Destroyer *destroyer, bool useEHCleanupForArray);
1679  void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
1680  QualType type, Destroyer *destroyer,
1681  bool useEHCleanupForArray);
1682  void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1683  llvm::Value *CompletePtr,
1684  QualType ElementType);
1685  void pushStackRestore(CleanupKind kind, Address SPMem);
1686  void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
1687  bool useEHCleanupForArray);
1688  llvm::Function *generateDestroyHelper(Address addr, QualType type,
1689  Destroyer *destroyer,
1690  bool useEHCleanupForArray,
1691  const VarDecl *VD);
1692  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1693  QualType elementType, CharUnits elementAlign,
1694  Destroyer *destroyer,
1695  bool checkZeroLength, bool useEHCleanup);
1696 
1697  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1698 
1699  /// Determines whether an EH cleanup is required to destroy a type
1700  /// with the given destruction kind.
1702  switch (kind) {
1703  case QualType::DK_none:
1704  return false;
1705  case QualType::DK_cxx_destructor:
1706  case QualType::DK_objc_weak_lifetime:
1707  case QualType::DK_nontrivial_c_struct:
1708  return getLangOpts().Exceptions;
1709  case QualType::DK_objc_strong_lifetime:
1710  return getLangOpts().Exceptions &&
1711  CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1712  }
1713  llvm_unreachable("bad destruction kind");
1714  }
1715 
1717  return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1718  }
1719 
1720  //===--------------------------------------------------------------------===//
1721  // Objective-C
1722  //===--------------------------------------------------------------------===//
1723 
1724  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1725 
1726  void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
1727 
1728  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1729  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1730  const ObjCPropertyImplDecl *PID);
1731  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1732  const ObjCPropertyImplDecl *propImpl,
1733  const ObjCMethodDecl *GetterMothodDecl,
1734  llvm::Constant *AtomicHelperFn);
1735 
1736  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1737  ObjCMethodDecl *MD, bool ctor);
1738 
1739  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1740  /// for the given property.
1741  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1742  const ObjCPropertyImplDecl *PID);
1743  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1744  const ObjCPropertyImplDecl *propImpl,
1745  llvm::Constant *AtomicHelperFn);
1746 
1747  //===--------------------------------------------------------------------===//
1748  // Block Bits
1749  //===--------------------------------------------------------------------===//
1750 
1751  /// Emit block literal.
1752  /// \return an LLVM value which is a pointer to a struct which contains
1753  /// information about the block, including the block invoke function, the
1754  /// captured variables, etc.
1755  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1756  static void destroyBlockInfos(CGBlockInfo *info);
1757 
1758  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1759  const CGBlockInfo &Info,
1760  const DeclMapTy &ldm,
1761  bool IsLambdaConversionToBlock,
1762  bool BuildGlobalBlock);
1763 
1764  /// Check if \p T is a C++ class that has a destructor that can throw.
1765  static bool cxxDestructorCanThrow(QualType T);
1766 
1767  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1768  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1769  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1770  const ObjCPropertyImplDecl *PID);
1771  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1772  const ObjCPropertyImplDecl *PID);
1773  llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1774 
1775  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
1776  bool CanThrow);
1777 
1778  class AutoVarEmission;
1779 
1780  void emitByrefStructureInit(const AutoVarEmission &emission);
1781 
1782  /// Enter a cleanup to destroy a __block variable. Note that this
1783  /// cleanup should be a no-op if the variable hasn't left the stack
1784  /// yet; if a cleanup is required for the variable itself, that needs
1785  /// to be done externally.
1786  ///
1787  /// \param Kind Cleanup kind.
1788  ///
1789  /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
1790  /// structure that will be passed to _Block_object_dispose. When
1791  /// \p LoadBlockVarAddr is true, the address of the field of the block
1792  /// structure that holds the address of the __block structure.
1793  ///
1794  /// \param Flags The flag that will be passed to _Block_object_dispose.
1795  ///
1796  /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
1797  /// \p Addr to get the address of the __block structure.
1798  void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
1799  bool LoadBlockVarAddr, bool CanThrow);
1800 
1801  void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
1802  llvm::Value *ptr);
1803 
1804  Address LoadBlockStruct();
1805  Address GetAddrOfBlockDecl(const VarDecl *var);
1806 
1807  /// BuildBlockByrefAddress - Computes the location of the
1808  /// data in a variable which is declared as __block.
1809  Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
1810  bool followForward = true);
1811  Address emitBlockByrefAddress(Address baseAddr,
1812  const BlockByrefInfo &info,
1813  bool followForward,
1814  const llvm::Twine &name);
1815 
1816  const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
1817 
1818  QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
1819 
1820  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1821  const CGFunctionInfo &FnInfo);
1822 
1823  /// Annotate the function with an attribute that disables TSan checking at
1824  /// runtime.
1825  void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
1826 
1827  /// Emit code for the start of a function.
1828  /// \param Loc The location to be associated with the function.
1829  /// \param StartLoc The location of the function body.
1830  void StartFunction(GlobalDecl GD,
1831  QualType RetTy,
1832  llvm::Function *Fn,
1833  const CGFunctionInfo &FnInfo,
1834  const FunctionArgList &Args,
1836  SourceLocation StartLoc = SourceLocation());
1837 
1838  static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
1839 
1840  void EmitConstructorBody(FunctionArgList &Args);
1841  void EmitDestructorBody(FunctionArgList &Args);
1842  void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
1843  void EmitFunctionBody(const Stmt *Body);
1844  void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
1845 
1846  void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
1847  CallArgList &CallArgs);
1848  void EmitLambdaBlockInvokeBody();
1849  void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1850  void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
1852  EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
1853  }
1854  void EmitAsanPrologueOrEpilogue(bool Prologue);
1855 
1856  /// Emit the unified return block, trying to avoid its emission when
1857  /// possible.
1858  /// \return The debug location of the user written return statement if the
1859  /// return block is is avoided.
1860  llvm::DebugLoc EmitReturnBlock();
1861 
1862  /// FinishFunction - Complete IR generation of the current function. It is
1863  /// legal to call this function even if there is no current insertion point.
1864  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1865 
1866  void StartThunk(llvm::Function *Fn, GlobalDecl GD,
1867  const CGFunctionInfo &FnInfo, bool IsUnprototyped);
1868 
1869  void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
1870  const ThunkInfo *Thunk, bool IsUnprototyped);
1871 
1872  void FinishThunk();
1873 
1874  /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
1875  void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
1876  llvm::FunctionCallee Callee);
1877 
1878  /// Generate a thunk for the given method.
1879  void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1880  GlobalDecl GD, const ThunkInfo &Thunk,
1881  bool IsUnprototyped);
1882 
1883  llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
1884  const CGFunctionInfo &FnInfo,
1885  GlobalDecl GD, const ThunkInfo &Thunk);
1886 
1887  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1888  FunctionArgList &Args);
1889 
1890  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
1891 
1892  /// Struct with all information about dynamic [sub]class needed to set vptr.
1893  struct VPtr {
1898  };
1899 
1900  /// Initialize the vtable pointer of the given subobject.
1901  void InitializeVTablePointer(const VPtr &vptr);
1902 
1904 
1905  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1906  VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
1907 
1908  void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
1909  CharUnits OffsetFromNearestVBase,
1910  bool BaseIsNonVirtualPrimaryBase,
1911  const CXXRecordDecl *VTableClass,
1912  VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
1913 
1914  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1915 
1916  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1917  /// to by This.
1918  llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
1919  const CXXRecordDecl *VTableClass);
1920 
1929  };
1930 
1931  /// Derived is the presumed address of an object of type T after a
1932  /// cast. If T is a polymorphic class type, emit a check that the virtual
1933  /// table for Derived belongs to a class derived from T.
1934  void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
1935  bool MayBeNull, CFITypeCheckKind TCK,
1936  SourceLocation Loc);
1937 
1938  /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
1939  /// If vptr CFI is enabled, emit a check that VTable is valid.
1940  void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
1941  CFITypeCheckKind TCK, SourceLocation Loc);
1942 
1943  /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
1944  /// RD using llvm.type.test.
1945  void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
1946  CFITypeCheckKind TCK, SourceLocation Loc);
1947 
1948  /// If whole-program virtual table optimization is enabled, emit an assumption
1949  /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
1950  /// enabled, emit a check that VTable is a member of RD's type identifier.
1951  void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
1952  llvm::Value *VTable, SourceLocation Loc);
1953 
1954  /// Returns whether we should perform a type checked load when loading a
1955  /// virtual function for virtual calls to members of RD. This is generally
1956  /// true when both vcall CFI and whole-program-vtables are enabled.
1957  bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
1958 
1959  /// Emit a type checked load from the given vtable.
1960  llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
1961  uint64_t VTableByteOffset);
1962 
1963  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1964  /// given phase of destruction for a destructor. The end result
1965  /// should call destructors on members and base classes in reverse
1966  /// order of their construction.
1967  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1968 
1969  /// ShouldInstrumentFunction - Return true if the current function should be
1970  /// instrumented with __cyg_profile_func_* calls
1971  bool ShouldInstrumentFunction();
1972 
1973  /// ShouldXRayInstrument - Return true if the current function should be
1974  /// instrumented with XRay nop sleds.
1975  bool ShouldXRayInstrumentFunction() const;
1976 
1977  /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
1978  /// XRay custom event handling calls.
1979  bool AlwaysEmitXRayCustomEvents() const;
1980 
1981  /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
1982  /// XRay typed event handling calls.
1983  bool AlwaysEmitXRayTypedEvents() const;
1984 
1985  /// Encode an address into a form suitable for use in a function prologue.
1986  llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
1987  llvm::Constant *Addr);
1988 
1989  /// Decode an address used in a function prologue, encoded by \c
1990  /// EncodeAddrForUseInPrologue.
1991  llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
1992  llvm::Value *EncodedAddr);
1993 
1994  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1995  /// arguments for the given function. This is also responsible for naming the
1996  /// LLVM function arguments.
1997  void EmitFunctionProlog(const CGFunctionInfo &FI,
1998  llvm::Function *Fn,
1999  const FunctionArgList &Args);
2000 
2001  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2002  /// given temporary.
2003  void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2004  SourceLocation EndLoc);
2005 
2006  /// Emit a test that checks if the return value \p RV is nonnull.
2007  void EmitReturnValueCheck(llvm::Value *RV);
2008 
2009  /// EmitStartEHSpec - Emit the start of the exception spec.
2010  void EmitStartEHSpec(const Decl *D);
2011 
2012  /// EmitEndEHSpec - Emit the end of the exception spec.
2013  void EmitEndEHSpec(const Decl *D);
2014 
2015  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2016  llvm::BasicBlock *getTerminateLandingPad();
2017 
2018  /// getTerminateLandingPad - Return a cleanup funclet that just calls
2019  /// terminate.
2020  llvm::BasicBlock *getTerminateFunclet();
2021 
2022  /// getTerminateHandler - Return a handler (not a landing pad, just
2023  /// a catch handler) that just calls terminate. This is used when
2024  /// a terminate scope encloses a try.
2025  llvm::BasicBlock *getTerminateHandler();
2026 
2027  llvm::Type *ConvertTypeForMem(QualType T);
2028  llvm::Type *ConvertType(QualType T);
2029  llvm::Type *ConvertType(const TypeDecl *T) {
2030  return ConvertType(getContext().getTypeDeclType(T));
2031  }
2032 
2033  /// LoadObjCSelf - Load the value of self. This function is only valid while
2034  /// generating code for an Objective-C method.
2035  llvm::Value *LoadObjCSelf();
2036 
2037  /// TypeOfSelfObject - Return type of object that this self represents.
2038  QualType TypeOfSelfObject();
2039 
2040  /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2041  static TypeEvaluationKind getEvaluationKind(QualType T);
2042 
2044  return getEvaluationKind(T) == TEK_Scalar;
2045  }
2046 
2048  return getEvaluationKind(T) == TEK_Aggregate;
2049  }
2050 
2051  /// createBasicBlock - Create an LLVM basic block.
2052  llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2053  llvm::Function *parent = nullptr,
2054  llvm::BasicBlock *before = nullptr) {
2055  return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2056  }
2057 
2058  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2059  /// label maps to.
2060  JumpDest getJumpDestForLabel(const LabelDecl *S);
2061 
2062  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2063  /// another basic block, simplify it. This assumes that no other code could
2064  /// potentially reference the basic block.
2065  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2066 
2067  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2068  /// adding a fall-through branch from the current insert block if
2069  /// necessary. It is legal to call this function even if there is no current
2070  /// insertion point.
2071  ///
2072  /// IsFinished - If true, indicates that the caller has finished emitting
2073  /// branches to the given block and does not expect to emit code into it. This
2074  /// means the block can be ignored if it is unreachable.
2075  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2076 
2077  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2078  /// near its uses, and leave the insertion point in it.
2079  void EmitBlockAfterUses(llvm::BasicBlock *BB);
2080 
2081  /// EmitBranch - Emit a branch to the specified basic block from the current
2082  /// insert block, taking care to avoid creation of branches from dummy
2083  /// blocks. It is legal to call this function even if there is no current
2084  /// insertion point.
2085  ///
2086  /// This function clears the current insertion point. The caller should follow
2087  /// calls to this function with calls to Emit*Block prior to generation new
2088  /// code.
2089  void EmitBranch(llvm::BasicBlock *Block);
2090 
2091  /// HaveInsertPoint - True if an insertion point is defined. If not, this
2092  /// indicates that the current code being emitted is unreachable.
2093  bool HaveInsertPoint() const {
2094  return Builder.GetInsertBlock() != nullptr;
2095  }
2096 
2097  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2098  /// emitted IR has a place to go. Note that by definition, if this function
2099  /// creates a block then that block is unreachable; callers may do better to
2100  /// detect when no insertion point is defined and simply skip IR generation.
2102  if (!HaveInsertPoint())
2103  EmitBlock(createBasicBlock());
2104  }
2105 
2106  /// ErrorUnsupported - Print out an error that codegen doesn't support the
2107  /// specified stmt yet.
2108  void ErrorUnsupported(const Stmt *S, const char *Type);
2109 
2110  //===--------------------------------------------------------------------===//
2111  // Helpers
2112  //===--------------------------------------------------------------------===//
2113 
2116  return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2117  CGM.getTBAAAccessInfo(T));
2118  }
2119 
2121  TBAAAccessInfo TBAAInfo) {
2122  return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2123  }
2124 
2127  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
2128  LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
2129  }
2130 
2132  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
2133  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
2134  BaseInfo, TBAAInfo);
2135  }
2136 
2137  LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
2138  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
2139  CharUnits getNaturalTypeAlignment(QualType T,
2140  LValueBaseInfo *BaseInfo = nullptr,
2141  TBAAAccessInfo *TBAAInfo = nullptr,
2142  bool forPointeeType = false);
2143  CharUnits getNaturalPointeeTypeAlignment(QualType T,
2144  LValueBaseInfo *BaseInfo = nullptr,
2145  TBAAAccessInfo *TBAAInfo = nullptr);
2146 
2147  Address EmitLoadOfReference(LValue RefLVal,
2148  LValueBaseInfo *PointeeBaseInfo = nullptr,
2149  TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2150  LValue EmitLoadOfReferenceLValue(LValue RefLVal);
2152  AlignmentSource Source =
2154  LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2155  CGM.getTBAAAccessInfo(RefTy));
2156  return EmitLoadOfReferenceLValue(RefLVal);
2157  }
2158 
2159  Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2160  LValueBaseInfo *BaseInfo = nullptr,
2161  TBAAAccessInfo *TBAAInfo = nullptr);
2162  LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2163 
2164  /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2165  /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2166  /// insertion point of the builder. The caller is responsible for setting an
2167  /// appropriate alignment on
2168  /// the alloca.
2169  ///
2170  /// \p ArraySize is the number of array elements to be allocated if it
2171  /// is not nullptr.
2172  ///
2173  /// LangAS::Default is the address space of pointers to local variables and
2174  /// temporaries, as exposed in the source language. In certain
2175  /// configurations, this is not the same as the alloca address space, and a
2176  /// cast is needed to lift the pointer from the alloca AS into
2177  /// LangAS::Default. This can happen when the target uses a restricted
2178  /// address space for the stack but the source language requires
2179  /// LangAS::Default to be a generic address space. The latter condition is
2180  /// common for most programming languages; OpenCL is an exception in that
2181  /// LangAS::Default is the private address space, which naturally maps
2182  /// to the stack.
2183  ///
2184  /// Because the address of a temporary is often exposed to the program in
2185  /// various ways, this function will perform the cast. The original alloca
2186  /// instruction is returned through \p Alloca if it is not nullptr.
2187  ///
2188  /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2189  /// more efficient if the caller knows that the address will not be exposed.
2190  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2191  llvm::Value *ArraySize = nullptr);
2192  Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
2193  const Twine &Name = "tmp",
2194  llvm::Value *ArraySize = nullptr,
2195  Address *Alloca = nullptr);
2196  Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2197  const Twine &Name = "tmp",
2198  llvm::Value *ArraySize = nullptr);
2199 
2200  /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2201  /// default ABI alignment of the given LLVM type.
2202  ///
2203  /// IMPORTANT NOTE: This is *not* generally the right alignment for
2204  /// any given AST type that happens to have been lowered to the
2205  /// given IR type. This should only ever be used for function-local,
2206  /// IR-driven manipulations like saving and restoring a value. Do
2207  /// not hand this address off to arbitrary IRGen routines, and especially
2208  /// do not pass it as an argument to a function that might expect a
2209  /// properly ABI-aligned value.
2210  Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2211  const Twine &Name = "tmp");
2212 
2213  /// InitTempAlloca - Provide an initial value for the given alloca which
2214  /// will be observable at all locations in the function.
2215  ///
2216  /// The address should be something that was returned from one of
2217  /// the CreateTempAlloca or CreateMemTemp routines, and the
2218  /// initializer must be valid in the entry block (i.e. it must
2219  /// either be a constant or an argument value).
2220  void InitTempAlloca(Address Alloca, llvm::Value *Value);
2221 
2222  /// CreateIRTemp - Create a temporary IR object of the given type, with
2223  /// appropriate alignment. This routine should only be used when an temporary
2224  /// value needs to be stored into an alloca (for example, to avoid explicit
2225  /// PHI construction), but the type is the IR type, not the type appropriate
2226  /// for storing in memory.
2227  ///
2228  /// That is, this is exactly equivalent to CreateMemTemp, but calling
2229  /// ConvertType instead of ConvertTypeForMem.
2230  Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
2231 
2232  /// CreateMemTemp - Create a temporary memory object of the given type, with
2233  /// appropriate alignmen and cast it to the default address space. Returns
2234  /// the original alloca instruction by \p Alloca if it is not nullptr.
2235  Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
2236  Address *Alloca = nullptr);
2237  Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
2238  Address *Alloca = nullptr);
2239 
2240  /// CreateMemTemp - Create a temporary memory object of the given type, with
2241  /// appropriate alignmen without casting it to the default address space.
2242  Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2243  Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
2244  const Twine &Name = "tmp");
2245 
2246  /// CreateAggTemp - Create a temporary memory object for the given
2247  /// aggregate type.
2248  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
2249  return AggValueSlot::forAddr(CreateMemTemp(T, Name),
2250  T.getQualifiers(),
2251  AggValueSlot::IsNotDestructed,
2252  AggValueSlot::DoesNotNeedGCBarriers,
2253  AggValueSlot::IsNotAliased,
2254  AggValueSlot::DoesNotOverlap);
2255  }
2256 
2257  /// Emit a cast to void* in the appropriate address space.
2258  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
2259 
2260  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2261  /// expression and compare the result against zero, returning an Int1Ty value.
2262  llvm::Value *EvaluateExprAsBool(const Expr *E);
2263 
2264  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2265  void EmitIgnoredExpr(const Expr *E);
2266 
2267  /// EmitAnyExpr - Emit code to compute the specified expression which can have
2268  /// any type. The result is returned as an RValue struct. If this is an
2269  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2270  /// the result should be returned.
2271  ///
2272  /// \param ignoreResult True if the resulting value isn't used.
2273  RValue EmitAnyExpr(const Expr *E,
2274  AggValueSlot aggSlot = AggValueSlot::ignored(),
2275  bool ignoreResult = false);
2276 
2277  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2278  // or the value of the expression, depending on how va_list is defined.
2279  Address EmitVAListRef(const Expr *E);
2280 
2281  /// Emit a "reference" to a __builtin_ms_va_list; this is
2282  /// always the value of the expression, because a __builtin_ms_va_list is a
2283  /// pointer to a char.
2284  Address EmitMSVAListRef(const Expr *E);
2285 
2286  /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2287  /// always be accessible even if no aggregate location is provided.
2288  RValue EmitAnyExprToTemp(const Expr *E);
2289 
2290  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2291  /// arbitrary expression into the given memory location.
2292  void EmitAnyExprToMem(const Expr *E, Address Location,
2293  Qualifiers Quals, bool IsInitializer);
2294 
2295  void EmitAnyExprToExn(const Expr *E, Address Addr);
2296 
2297  /// EmitExprAsInit - Emits the code necessary to initialize a
2298  /// location in memory with the given initializer.
2299  void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2300  bool capturedByInit);
2301 
2302  /// hasVolatileMember - returns true if aggregate type has a volatile
2303  /// member.
2305  if (const RecordType *RT = T->getAs<RecordType>()) {
2306  const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2307  return RD->hasVolatileMember();
2308  }
2309  return false;
2310  }
2311 
2312  /// Determine whether a return value slot may overlap some other object.
2314  // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2315  // class subobjects. These cases may need to be revisited depending on the
2316  // resolution of the relevant core issue.
2317  return AggValueSlot::DoesNotOverlap;
2318  }
2319 
2320  /// Determine whether a field initialization may overlap some other object.
2322  // FIXME: These cases can result in overlap as a result of P0840R0's
2323  // [[no_unique_address]] attribute. We can still infer NoOverlap in the
2324  // presence of that attribute if the field is within the nvsize of its
2325  // containing class, because non-virtual subobjects are initialized in
2326  // address order.
2327  return AggValueSlot::DoesNotOverlap;
2328  }
2329 
2330  /// Determine whether a base class initialization may overlap some other
2331  /// object.
2332  AggValueSlot::Overlap_t overlapForBaseInit(const CXXRecordDecl *RD,
2333  const CXXRecordDecl *BaseRD,
2334  bool IsVirtual);
2335 
2336  /// Emit an aggregate assignment.
2337  void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
2338  bool IsVolatile = hasVolatileMember(EltTy);
2339  EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2340  }
2341 
2343  AggValueSlot::Overlap_t MayOverlap) {
2344  EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2345  }
2346 
2347  /// EmitAggregateCopy - Emit an aggregate copy.
2348  ///
2349  /// \param isVolatile \c true iff either the source or the destination is
2350  /// volatile.
2351  /// \param MayOverlap Whether the tail padding of the destination might be
2352  /// occupied by some other object. More efficient code can often be
2353  /// generated if not.
2354  void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
2355  AggValueSlot::Overlap_t MayOverlap,
2356  bool isVolatile = false);
2357 
2358  /// GetAddrOfLocalVar - Return the address of a local variable.
2360  auto it = LocalDeclMap.find(VD);
2361  assert(it != LocalDeclMap.end() &&
2362  "Invalid argument to GetAddrOfLocalVar(), no decl!");
2363  return it->second;
2364  }
2365 
2366  /// Given an opaque value expression, return its LValue mapping if it exists,
2367  /// otherwise create one.
2368  LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
2369 
2370  /// Given an opaque value expression, return its RValue mapping if it exists,
2371  /// otherwise create one.
2372  RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
2373 
2374  /// Get the index of the current ArrayInitLoopExpr, if any.
2375  llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
2376 
2377  /// getAccessedFieldNo - Given an encoded value and a result number, return
2378  /// the input field number being accessed.
2379  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
2380 
2381  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
2382  llvm::BasicBlock *GetIndirectGotoBlock();
2383 
2384  /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
2385  static bool IsWrappedCXXThis(const Expr *E);
2386 
2387  /// EmitNullInitialization - Generate code to set a value of the given type to
2388  /// null, If the type contains data member pointers, they will be initialized
2389  /// to -1 in accordance with the Itanium C++ ABI.
2390  void EmitNullInitialization(Address DestPtr, QualType Ty);
2391 
2392  /// Emits a call to an LLVM variable-argument intrinsic, either
2393  /// \c llvm.va_start or \c llvm.va_end.
2394  /// \param ArgValue A reference to the \c va_list as emitted by either
2395  /// \c EmitVAListRef or \c EmitMSVAListRef.
2396  /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
2397  /// calls \c llvm.va_end.
2398  llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
2399 
2400  /// Generate code to get an argument from the passed in pointer
2401  /// and update it accordingly.
2402  /// \param VE The \c VAArgExpr for which to generate code.
2403  /// \param VAListAddr Receives a reference to the \c va_list as emitted by
2404  /// either \c EmitVAListRef or \c EmitMSVAListRef.
2405  /// \returns A pointer to the argument.
2406  // FIXME: We should be able to get rid of this method and use the va_arg
2407  // instruction in LLVM instead once it works well enough.
2408  Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
2409 
2410  /// emitArrayLength - Compute the length of an array, even if it's a
2411  /// VLA, and drill down to the base element type.
2412  llvm::Value *emitArrayLength(const ArrayType *arrayType,
2413  QualType &baseType,
2414  Address &addr);
2415 
2416  /// EmitVLASize - Capture all the sizes for the VLA expressions in
2417  /// the given variably-modified type and store them in the VLASizeMap.
2418  ///
2419  /// This function can be called with a null (unreachable) insert point.
2420  void EmitVariablyModifiedType(QualType Ty);
2421 
2422  struct VlaSizePair {
2425 
2426  VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
2427  };
2428 
2429  /// Return the number of elements for a single dimension
2430  /// for the given array type.
2431  VlaSizePair getVLAElements1D(const VariableArrayType *vla);
2432  VlaSizePair getVLAElements1D(QualType vla);
2433 
2434  /// Returns an LLVM value that corresponds to the size,
2435  /// in non-variably-sized elements, of a variable length array type,
2436  /// plus that largest non-variably-sized element type. Assumes that
2437  /// the type has already been emitted with EmitVariablyModifiedType.
2438  VlaSizePair getVLASize(const VariableArrayType *vla);
2439  VlaSizePair getVLASize(QualType vla);
2440 
2441  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
2442  /// generating code for an C++ member function.
2444  assert(CXXThisValue && "no 'this' value for this function");
2445  return CXXThisValue;
2446  }
2447  Address LoadCXXThisAddress();
2448 
2449  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
2450  /// virtual bases.
2451  // FIXME: Every place that calls LoadCXXVTT is something
2452  // that needs to be abstracted properly.
2454  assert(CXXStructorImplicitParamValue && "no VTT value for this function");
2455  return CXXStructorImplicitParamValue;
2456  }
2457 
2458  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
2459  /// complete class to the given direct base.
2460  Address
2461  GetAddressOfDirectBaseInCompleteClass(Address Value,
2462  const CXXRecordDecl *Derived,
2463  const CXXRecordDecl *Base,
2464  bool BaseIsVirtual);
2465 
2466  static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
2467 
2468  /// GetAddressOfBaseClass - This function will add the necessary delta to the
2469  /// load of 'this' and returns address of the base class.
2470  Address GetAddressOfBaseClass(Address Value,
2471  const CXXRecordDecl *Derived,
2474  bool NullCheckValue, SourceLocation Loc);
2475 
2476  Address GetAddressOfDerivedClass(Address Value,
2477  const CXXRecordDecl *Derived,
2480  bool NullCheckValue);
2481 
2482  /// GetVTTParameter - Return the VTT parameter that should be passed to a
2483  /// base constructor/destructor with virtual bases.
2484  /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
2485  /// to ItaniumCXXABI.cpp together with all the references to VTT.
2486  llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
2487  bool Delegating);
2488 
2489  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
2490  CXXCtorType CtorType,
2491  const FunctionArgList &Args,
2492  SourceLocation Loc);
2493  // It's important not to confuse this and the previous function. Delegating
2494  // constructors are the C++0x feature. The constructor delegate optimization
2495  // is used to reduce duplication in the base and complete consturctors where
2496  // they are substantially the same.
2497  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2498  const FunctionArgList &Args);
2499 
2500  /// Emit a call to an inheriting constructor (that is, one that invokes a
2501  /// constructor inherited from a base class) by inlining its definition. This
2502  /// is necessary if the ABI does not support forwarding the arguments to the
2503  /// base class constructor (because they're variadic or similar).
2504  void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2505  CXXCtorType CtorType,
2506  bool ForVirtualBase,
2507  bool Delegating,
2508  CallArgList &Args);
2509 
2510  /// Emit a call to a constructor inherited from a base class, passing the
2511  /// current constructor's arguments along unmodified (without even making
2512  /// a copy).
2513  void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
2514  bool ForVirtualBase, Address This,
2515  bool InheritedFromVBase,
2516  const CXXInheritedCtorInitExpr *E);
2517 
2518  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2519  bool ForVirtualBase, bool Delegating,
2520  AggValueSlot ThisAVS, const CXXConstructExpr *E);
2521 
2522  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2523  bool ForVirtualBase, bool Delegating,
2524  Address This, CallArgList &Args,
2525  AggValueSlot::Overlap_t Overlap,
2526  SourceLocation Loc, bool NewPointerIsChecked);
2527 
2528  /// Emit assumption load for all bases. Requires to be be called only on
2529  /// most-derived class and not under construction of the object.
2530  void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
2531 
2532  /// Emit assumption that vptr load == global vtable.
2533  void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
2534 
2535  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
2536  Address This, Address Src,
2537  const CXXConstructExpr *E);
2538 
2539  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2540  const ArrayType *ArrayTy,
2541  Address ArrayPtr,
2542  const CXXConstructExpr *E,
2543  bool NewPointerIsChecked,
2544  bool ZeroInitialization = false);
2545 
2546  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2547  llvm::Value *NumElements,
2548  Address ArrayPtr,
2549  const CXXConstructExpr *E,
2550  bool NewPointerIsChecked,
2551  bool ZeroInitialization = false);
2552 
2553  static Destroyer destroyCXXObject;
2554 
2555  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
2556  bool ForVirtualBase, bool Delegating,
2557  Address This);
2558 
2559  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
2560  llvm::Type *ElementTy, Address NewPtr,
2561  llvm::Value *NumElements,
2562  llvm::Value *AllocSizeWithoutCookie);
2563 
2564  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
2565  Address Ptr);
2566 
2567  llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
2568  void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
2569 
2570  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
2571  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
2572 
2573  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
2574  QualType DeleteTy, llvm::Value *NumElements = nullptr,
2575  CharUnits CookieSize = CharUnits());
2576 
2577  RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
2578  const CallExpr *TheCallExpr, bool IsDelete);
2579 
2580  llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
2581  llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
2582  Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
2583 
2584  /// Situations in which we might emit a check for the suitability of a
2585  /// pointer or glvalue.
2587  /// Checking the operand of a load. Must be suitably sized and aligned.
2589  /// Checking the destination of a store. Must be suitably sized and aligned.
2591  /// Checking the bound value in a reference binding. Must be suitably sized
2592  /// and aligned, but is not required to refer to an object (until the
2593  /// reference is used), per core issue 453.
2595  /// Checking the object expression in a non-static data member access. Must
2596  /// be an object within its lifetime.
2598  /// Checking the 'this' pointer for a call to a non-static member function.
2599  /// Must be an object within its lifetime.
2601  /// Checking the 'this' pointer for a constructor call.
2603  /// Checking the operand of a static_cast to a derived pointer type. Must be
2604  /// null or an object within its lifetime.
2606  /// Checking the operand of a static_cast to a derived reference type. Must
2607  /// be an object within its lifetime.
2609  /// Checking the operand of a cast to a base object. Must be suitably sized
2610  /// and aligned.
2612  /// Checking the operand of a cast to a virtual base object. Must be an
2613  /// object within its lifetime.
2615  /// Checking the value assigned to a _Nonnull pointer. Must not be null.
2617  /// Checking the operand of a dynamic_cast or a typeid expression. Must be
2618  /// null or an object within its lifetime.
2619  TCK_DynamicOperation
2620  };
2621 
2622  /// Determine whether the pointer type check \p TCK permits null pointers.
2623  static bool isNullPointerAllowed(TypeCheckKind TCK);
2624 
2625  /// Determine whether the pointer type check \p TCK requires a vptr check.
2626  static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
2627 
2628  /// Whether any type-checking sanitizers are enabled. If \c false,
2629  /// calls to EmitTypeCheck can be skipped.
2630  bool sanitizePerformTypeCheck() const;
2631 
2632  /// Emit a check that \p V is the address of storage of the
2633  /// appropriate size and alignment for an object of type \p Type
2634  /// (or if ArraySize is provided, for an array of that bound).
2635  void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
2636  QualType Type, CharUnits Alignment = CharUnits::Zero(),
2637  SanitizerSet SkippedChecks = SanitizerSet(),
2638  llvm::Value *ArraySize = nullptr);
2639 
2640  /// Emit a check that \p Base points into an array object, which
2641  /// we can access at index \p Index. \p Accessed should be \c false if we
2642  /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
2643  void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
2644  QualType IndexType, bool Accessed);
2645 
2646  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2647  bool isInc, bool isPre);
2648  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
2649  bool isInc, bool isPre);
2650 
2651  /// Converts Location to a DebugLoc, if debug information is enabled.
2652  llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
2653 
2654 
2655  //===--------------------------------------------------------------------===//
2656  // Declaration Emission
2657  //===--------------------------------------------------------------------===//
2658 
2659  /// EmitDecl - Emit a declaration.
2660  ///
2661  /// This function can be called with a null (unreachable) insert point.
2662  void EmitDecl(const Decl &D);
2663 
2664  /// EmitVarDecl - Emit a local variable declaration.
2665  ///
2666  /// This function can be called with a null (unreachable) insert point.
2667  void EmitVarDecl(const VarDecl &D);
2668 
2669  void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2670  bool capturedByInit);
2671 
2672  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
2673  llvm::Value *Address);
2674 
2675  /// Determine whether the given initializer is trivial in the sense
2676  /// that it requires no code to be generated.
2677  bool isTrivialInitializer(const Expr *Init);
2678 
2679  /// EmitAutoVarDecl - Emit an auto variable declaration.
2680  ///
2681  /// This function can be called with a null (unreachable) insert point.
2682  void EmitAutoVarDecl(const VarDecl &D);
2683 
2685  friend class CodeGenFunction;
2686 
2687  const VarDecl *Variable;
2688 
2689  /// The address of the alloca for languages with explicit address space
2690  /// (e.g. OpenCL) or alloca casted to generic pointer for address space
2691  /// agnostic languages (e.g. C++). Invalid if the variable was emitted
2692  /// as a global constant.
2693  Address Addr;
2694 
2695  llvm::Value *NRVOFlag;
2696 
2697  /// True if the variable is a __block variable that is captured by an
2698  /// escaping block.
2699  bool IsEscapingByRef;
2700 
2701  /// True if the variable is of aggregate type and has a constant
2702  /// initializer.
2703  bool IsConstantAggregate;
2704 
2705  /// Non-null if we should use lifetime annotations.
2706  llvm::Value *SizeForLifetimeMarkers;
2707 
2708  /// Address with original alloca instruction. Invalid if the variable was
2709  /// emitted as a global constant.
2710  Address AllocaAddr;
2711 
2712  struct Invalid {};
2713  AutoVarEmission(Invalid)
2714  : Variable(nullptr), Addr(Address::invalid()),
2715  AllocaAddr(Address::invalid()) {}
2716 
2717  AutoVarEmission(const VarDecl &variable)
2718  : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
2719  IsEscapingByRef(false), IsConstantAggregate(false),
2720  SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
2721 
2722  bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
2723 
2724  public:
2725  static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
2726 
2727  bool useLifetimeMarkers() const {
2728  return SizeForLifetimeMarkers != nullptr;
2729  }
2731  assert(useLifetimeMarkers());
2732  return SizeForLifetimeMarkers;
2733  }
2734 
2735  /// Returns the raw, allocated address, which is not necessarily
2736  /// the address of the object itself. It is casted to default
2737  /// address space for address space agnostic languages.
2739  return Addr;
2740  }
2741 
2742  /// Returns the address for the original alloca instruction.
2743  Address getOriginalAllocatedAddress() const { return AllocaAddr; }
2744 
2745  /// Returns the address of the object within this declaration.
2746  /// Note that this does not chase the forwarding pointer for
2747  /// __block decls.
2748  Address getObjectAddress(CodeGenFunction &CGF) const {
2749  if (!IsEscapingByRef) return Addr;
2750 
2751  return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
2752  }
2753  };
2754  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
2755  void EmitAutoVarInit(const AutoVarEmission &emission);
2756  void EmitAutoVarCleanups(const AutoVarEmission &emission);
2757  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
2758  QualType::DestructionKind dtorKind);
2759 
2760  /// Emits the alloca and debug information for the size expressions for each
2761  /// dimension of an array. It registers the association of its (1-dimensional)
2762  /// QualTypes and size expression's debug node, so that CGDebugInfo can
2763  /// reference this node when creating the DISubrange object to describe the
2764  /// array types.
2765  void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI,
2766  const VarDecl &D,
2767  bool EmitDebugInfo);
2768 
2769  void EmitStaticVarDecl(const VarDecl &D,
2770  llvm::GlobalValue::LinkageTypes Linkage);
2771 
2772  class ParamValue {
2773  llvm::Value *Value;
2774  unsigned Alignment;
2775  ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
2776  public:
2778  return ParamValue(value, 0);
2779  }
2781  assert(!addr.getAlignment().isZero());
2782  return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
2783  }
2784 
2785  bool isIndirect() const { return Alignment != 0; }
2786  llvm::Value *getAnyValue() const { return Value; }
2787 
2789  assert(!isIndirect());
2790  return Value;
2791  }
2792 
2794  assert(isIndirect());
2795  return Address(Value, CharUnits::fromQuantity(Alignment));
2796  }
2797  };
2798 
2799  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
2800  void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
2801 
2802  /// protectFromPeepholes - Protect a value that we're intending to
2803  /// store to the side, but which will probably be used later, from
2804  /// aggressive peepholing optimizations that might delete it.
2805  ///
2806  /// Pass the result to unprotectFromPeepholes to declare that
2807  /// protection is no longer required.
2808  ///
2809  /// There's no particular reason why this shouldn't apply to
2810  /// l-values, it's just that no existing peepholes work on pointers.
2811  PeepholeProtection protectFromPeepholes(RValue rvalue);
2812  void unprotectFromPeepholes(PeepholeProtection protection);
2813 
2814  void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
2815  SourceLocation Loc,
2816  SourceLocation AssumptionLoc,
2817  llvm::Value *Alignment,
2818  llvm::Value *OffsetValue,
2819  llvm::Value *TheCheck,
2820  llvm::Instruction *Assumption);
2821 
2822  void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
2823  SourceLocation Loc, SourceLocation AssumptionLoc,
2824  llvm::Value *Alignment,
2825  llvm::Value *OffsetValue = nullptr);
2826 
2827  void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
2828  SourceLocation Loc, SourceLocation AssumptionLoc,
2829  unsigned Alignment,
2830  llvm::Value *OffsetValue = nullptr);
2831 
2832  void EmitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
2833  SourceLocation AssumptionLoc, unsigned Alignment,
2834  llvm::Value *OffsetValue = nullptr);
2835 
2836  //===--------------------------------------------------------------------===//
2837  // Statement Emission
2838  //===--------------------------------------------------------------------===//
2839 
2840  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
2841  void EmitStopPoint(const Stmt *S);
2842 
2843  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
2844  /// this function even if there is no current insertion point.
2845  ///
2846  /// This function may clear the current insertion point; callers should use
2847  /// EnsureInsertPoint if they wish to subsequently generate code without first
2848  /// calling EmitBlock, EmitBranch, or EmitStmt.
2849  void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = None);
2850 
2851  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
2852  /// necessarily require an insertion point or debug information; typically
2853  /// because the statement amounts to a jump or a container of other
2854  /// statements.
2855  ///
2856  /// \return True if the statement was handled.
2857  bool EmitSimpleStmt(const Stmt *S);
2858 
2859  Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
2860  AggValueSlot AVS = AggValueSlot::ignored());
2861  Address EmitCompoundStmtWithoutScope(const CompoundStmt &S,
2862  bool GetLast = false,
2863  AggValueSlot AVS =
2864  AggValueSlot::ignored());
2865 
2866  /// EmitLabel - Emit the block for the given label. It is legal to call this
2867  /// function even if there is no current insertion point.
2868  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
2869 
2870  void EmitLabelStmt(const LabelStmt &S);
2871  void EmitAttributedStmt(const AttributedStmt &S);
2872  void EmitGotoStmt(const GotoStmt &S);
2873  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
2874  void EmitIfStmt(const IfStmt &S);
2875 
2876  void EmitWhileStmt(const WhileStmt &S,
2877  ArrayRef<const Attr *> Attrs = None);
2878  void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
2879  void EmitForStmt(const ForStmt &S,
2880  ArrayRef<const Attr *> Attrs = None);
2881  void EmitReturnStmt(const ReturnStmt &S);
2882  void EmitDeclStmt(const DeclStmt &S);
2883  void EmitBreakStmt(const BreakStmt &S);
2884  void EmitContinueStmt(const ContinueStmt &S);
2885  void EmitSwitchStmt(const SwitchStmt &S);
2886  void EmitDefaultStmt(const DefaultStmt &S);
2887  void EmitCaseStmt(const CaseStmt &S);
2888  void EmitCaseStmtRange(const CaseStmt &S);
2889  void EmitAsmStmt(const AsmStmt &S);
2890 
2891  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
2892  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
2893  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
2894  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
2895  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
2896 
2897  void EmitCoroutineBody(const CoroutineBodyStmt &S);
2898  void EmitCoreturnStmt(const CoreturnStmt &S);
2899  RValue EmitCoawaitExpr(const CoawaitExpr &E,
2900  AggValueSlot aggSlot = AggValueSlot::ignored(),
2901  bool ignoreResult = false);
2902  LValue EmitCoawaitLValue(const CoawaitExpr *E);
2903  RValue EmitCoyieldExpr(const CoyieldExpr &E,
2904  AggValueSlot aggSlot = AggValueSlot::ignored(),
2905  bool ignoreResult = false);
2906  LValue EmitCoyieldLValue(const CoyieldExpr *E);
2907  RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
2908 
2909  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2910  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2911 
2912  void EmitCXXTryStmt(const CXXTryStmt &S);
2913  void EmitSEHTryStmt(const SEHTryStmt &S);
2914  void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
2915  void EnterSEHTryStmt(const SEHTryStmt &S);
2916  void ExitSEHTryStmt(const SEHTryStmt &S);
2917 
2918  void pushSEHCleanup(CleanupKind kind,
2919  llvm::Function *FinallyFunc);
2920  void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
2921  const Stmt *OutlinedStmt);
2922 
2923  llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
2924  const SEHExceptStmt &Except);
2925 
2926  llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
2927  const SEHFinallyStmt &Finally);
2928 
2929  void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
2930  llvm::Value *ParentFP,
2931  llvm::Value *EntryEBP);
2932  llvm::Value *EmitSEHExceptionCode();
2933  llvm::Value *EmitSEHExceptionInfo();
2934  llvm::Value *EmitSEHAbnormalTermination();
2935 
2936  /// Emit simple code for OpenMP directives in Simd-only mode.
2937  void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
2938 
2939  /// Scan the outlined statement for captures from the parent function. For
2940  /// each capture, mark the capture as escaped and emit a call to
2941  /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
2942  void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
2943  bool IsFilter);
2944 
2945  /// Recovers the address of a local in a parent function. ParentVar is the
2946  /// address of the variable used in the immediate parent function. It can
2947  /// either be an alloca or a call to llvm.localrecover if there are nested
2948  /// outlined functions. ParentFP is the frame pointer of the outermost parent
2949  /// frame.
2950  Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
2951  Address ParentVar,
2952  llvm::Value *ParentFP);
2953 
2954  void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
2955  ArrayRef<const Attr *> Attrs = None);
2956 
2957  /// Controls insertion of cancellation exit blocks in worksharing constructs.
2959  CodeGenFunction &CGF;
2960 
2961  public:
2962  OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
2963  bool HasCancel)
2964  : CGF(CGF) {
2965  CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
2966  }
2967  ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
2968  };
2969 
2970  /// Returns calculated size of the specified type.
2971  llvm::Value *getTypeSize(QualType Ty);
2972  LValue InitCapturedStruct(const CapturedStmt &S);
2973  llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
2974  llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
2975  Address GenerateCapturedStmtArgument(const CapturedStmt &S);
2976  llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
2977  void GenerateOpenMPCapturedVars(const CapturedStmt &S,
2978  SmallVectorImpl<llvm::Value *> &CapturedVars);
2979  void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
2980  SourceLocation Loc);
2981  /// Perform element by element copying of arrays with type \a
2982  /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
2983  /// generated by \a CopyGen.
2984  ///
2985  /// \param DestAddr Address of the destination array.
2986  /// \param SrcAddr Address of the source array.
2987  /// \param OriginalType Type of destination and source arrays.
2988  /// \param CopyGen Copying procedure that copies value of single array element
2989  /// to another single array element.
2990  void EmitOMPAggregateAssign(
2991  Address DestAddr, Address SrcAddr, QualType OriginalType,
2992  const llvm::function_ref<void(Address, Address)> CopyGen);
2993  /// Emit proper copying of data from one variable to another.
2994  ///
2995  /// \param OriginalType Original type of the copied variables.
2996  /// \param DestAddr Destination address.
2997  /// \param SrcAddr Source address.
2998  /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
2999  /// type of the base array element).
3000  /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3001  /// the base array element).
3002  /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3003  /// DestVD.
3004  void EmitOMPCopy(QualType OriginalType,
3005  Address DestAddr, Address SrcAddr,
3006  const VarDecl *DestVD, const VarDecl *SrcVD,
3007  const Expr *Copy);
3008  /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3009  /// \a X = \a E \a BO \a E.
3010  ///
3011  /// \param X Value to be updated.
3012  /// \param E Update value.
3013  /// \param BO Binary operation for update operation.
3014  /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3015  /// expression, false otherwise.
3016  /// \param AO Atomic ordering of the generated atomic instructions.
3017  /// \param CommonGen Code generator for complex expressions that cannot be
3018  /// expressed through atomicrmw instruction.
3019  /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3020  /// generated, <false, RValue::get(nullptr)> otherwise.
3021  std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3022  LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3023  llvm::AtomicOrdering AO, SourceLocation Loc,
3024  const llvm::function_ref<RValue(RValue)> CommonGen);
3025  bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
3026  OMPPrivateScope &PrivateScope);
3027  void EmitOMPPrivateClause(const OMPExecutableDirective &D,
3028  OMPPrivateScope &PrivateScope);
3029  void EmitOMPUseDevicePtrClause(
3030  const OMPClause &C, OMPPrivateScope &PrivateScope,
3031  const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
3032  /// Emit code for copyin clause in \a D directive. The next code is
3033  /// generated at the start of outlined functions for directives:
3034  /// \code
3035  /// threadprivate_var1 = master_threadprivate_var1;
3036  /// operator=(threadprivate_var2, master_threadprivate_var2);
3037  /// ...
3038  /// __kmpc_barrier(&loc, global_tid);
3039  /// \endcode
3040  ///
3041  /// \param D OpenMP directive possibly with 'copyin' clause(s).
3042  /// \returns true if at least one copyin variable is found, false otherwise.
3043  bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
3044  /// Emit initial code for lastprivate variables. If some variable is
3045  /// not also firstprivate, then the default initialization is used. Otherwise
3046  /// initialization of this variable is performed by EmitOMPFirstprivateClause
3047  /// method.
3048  ///
3049  /// \param D Directive that may have 'lastprivate' directives.
3050  /// \param PrivateScope Private scope for capturing lastprivate variables for
3051  /// proper codegen in internal captured statement.
3052  ///
3053  /// \returns true if there is at least one lastprivate variable, false
3054  /// otherwise.
3055  bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
3056  OMPPrivateScope &PrivateScope);
3057  /// Emit final copying of lastprivate values to original variables at
3058  /// the end of the worksharing or simd directive.
3059  ///
3060  /// \param D Directive that has at least one 'lastprivate' directives.
3061  /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3062  /// it is the last iteration of the loop code in associated directive, or to
3063  /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3064  void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
3065  bool NoFinals,
3066  llvm::Value *IsLastIterCond = nullptr);
3067  /// Emit initial code for linear clauses.
3068  void EmitOMPLinearClause(const OMPLoopDirective &D,
3069  CodeGenFunction::OMPPrivateScope &PrivateScope);
3070  /// Emit final code for linear clauses.
3071  /// \param CondGen Optional conditional code for final part of codegen for
3072  /// linear clause.
3073  void EmitOMPLinearClauseFinal(
3074  const OMPLoopDirective &D,
3075  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3076  /// Emit initial code for reduction variables. Creates reduction copies
3077  /// and initializes them with the values according to OpenMP standard.
3078  ///
3079  /// \param D Directive (possibly) with the 'reduction' clause.
3080  /// \param PrivateScope Private scope for capturing reduction variables for
3081  /// proper codegen in internal captured statement.
3082  ///
3083  void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
3084  OMPPrivateScope &PrivateScope);
3085  /// Emit final update of reduction values to original variables at
3086  /// the end of the directive.
3087  ///
3088  /// \param D Directive that has at least one 'reduction' directives.
3089  /// \param ReductionKind The kind of reduction to perform.
3090  void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
3091  const OpenMPDirectiveKind ReductionKind);
3092  /// Emit initial code for linear variables. Creates private copies
3093  /// and initializes them with the values according to OpenMP standard.
3094  ///
3095  /// \param D Directive (possibly) with the 'linear' clause.
3096  /// \return true if at least one linear variable is found that should be
3097  /// initialized with the value of the original variable, false otherwise.
3098  bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
3099 
3100  typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3101  llvm::Function * /*OutlinedFn*/,
3102  const OMPTaskDataTy & /*Data*/)>
3104  void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
3105  const OpenMPDirectiveKind CapturedRegion,
3106  const RegionCodeGenTy &BodyGen,
3107  const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3109  Address BasePointersArray = Address::invalid();
3110  Address PointersArray = Address::invalid();
3111  Address SizesArray = Address::invalid();
3112  unsigned NumberOfTargetItems = 0;
3113  explicit OMPTargetDataInfo() = default;
3114  OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
3115  Address SizesArray, unsigned NumberOfTargetItems)
3116  : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
3117  SizesArray(SizesArray), NumberOfTargetItems(NumberOfTargetItems) {}
3118  };
3119  void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
3120  const RegionCodeGenTy &BodyGen,
3121  OMPTargetDataInfo &InputInfo);
3122 
3123  void EmitOMPParallelDirective(const OMPParallelDirective &S);
3124  void EmitOMPSimdDirective(const OMPSimdDirective &S);
3125  void EmitOMPForDirective(const OMPForDirective &S);
3126  void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
3127  void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
3128  void EmitOMPSectionDirective(const OMPSectionDirective &S);
3129  void EmitOMPSingleDirective(const OMPSingleDirective &S);
3130  void EmitOMPMasterDirective(const OMPMasterDirective &S);
3131  void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
3132  void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
3133  void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
3134  void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
3135  void EmitOMPTaskDirective(const OMPTaskDirective &S);
3136  void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
3137  void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
3138  void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
3139  void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
3140  void EmitOMPFlushDirective(const OMPFlushDirective &S);
3141  void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
3142  void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
3143  void EmitOMPTargetDirective(const OMPTargetDirective &S);
3144  void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
3145  void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
3146  void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
3147  void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
3148  void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
3149  void
3150  EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
3151  void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
3152  void
3153  EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
3154  void EmitOMPCancelDirective(const OMPCancelDirective &S);
3155  void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
3156  void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
3157  void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
3158  void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
3159  void EmitOMPDistributeParallelForDirective(
3161  void EmitOMPDistributeParallelForSimdDirective(
3163  void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
3164  void EmitOMPTargetParallelForSimdDirective(
3166  void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
3167  void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
3168  void
3169  EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
3170  void EmitOMPTeamsDistributeParallelForSimdDirective(
3172  void EmitOMPTeamsDistributeParallelForDirective(
3174  void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3175  void EmitOMPTargetTeamsDistributeDirective(
3177  void EmitOMPTargetTeamsDistributeParallelForDirective(
3179  void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3181  void EmitOMPTargetTeamsDistributeSimdDirective(
3183 
3184  /// Emit device code for the target directive.
3185  static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3186  StringRef ParentName,
3187  const OMPTargetDirective &S);
3188  static void
3189  EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3190  const OMPTargetParallelDirective &S);
3191  /// Emit device code for the target parallel for directive.
3192  static void EmitOMPTargetParallelForDeviceFunction(
3193  CodeGenModule &CGM, StringRef ParentName,
3195  /// Emit device code for the target parallel for simd directive.
3196  static void EmitOMPTargetParallelForSimdDeviceFunction(
3197  CodeGenModule &CGM, StringRef ParentName,
3199  /// Emit device code for the target teams directive.
3200  static void
3201  EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3202  const OMPTargetTeamsDirective &S);
3203  /// Emit device code for the target teams distribute directive.
3204  static void EmitOMPTargetTeamsDistributeDeviceFunction(
3205  CodeGenModule &CGM, StringRef ParentName,
3207  /// Emit device code for the target teams distribute simd directive.
3208  static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3209  CodeGenModule &CGM, StringRef ParentName,
3211  /// Emit device code for the target simd directive.
3212  static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
3213  StringRef ParentName,
3214  const OMPTargetSimdDirective &S);
3215  /// Emit device code for the target teams distribute parallel for simd
3216  /// directive.
3217  static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3218  CodeGenModule &CGM, StringRef ParentName,
3220 
3221  static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3222  CodeGenModule &CGM, StringRef ParentName,
3224  /// Emit inner loop of the worksharing/simd construct.
3225  ///
3226  /// \param S Directive, for which the inner loop must be emitted.
3227  /// \param RequiresCleanup true, if directive has some associated private
3228  /// variables.
3229  /// \param LoopCond Bollean condition for loop continuation.
3230  /// \param IncExpr Increment expression for loop control variable.
3231  /// \param BodyGen Generator for the inner body of the inner loop.
3232  /// \param PostIncGen Genrator for post-increment code (required for ordered
3233  /// loop directvies).
3234  void EmitOMPInnerLoop(
3235  const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
3236  const Expr *IncExpr,
3237  const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3238  const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3239 
3240  JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
3241  /// Emit initial code for loop counters of loop-based directives.
3242  void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
3243  OMPPrivateScope &LoopScope);
3244 
3245  /// Helper for the OpenMP loop directives.
3246  void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
3247 
3248  /// Emit code for the worksharing loop-based directive.
3249  /// \return true, if this construct has any lastprivate clause, false -
3250  /// otherwise.
3251  bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
3252  const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3253  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3254 
3255  /// Emit code for the distribute loop-based directive.
3256  void EmitOMPDistributeLoop(const OMPLoopDirective &S,
3257  const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
3258 
3259  /// Helpers for the OpenMP loop directives.
3260  void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
3261  void EmitOMPSimdFinal(
3262  const OMPLoopDirective &D,
3263  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3264 
3265  /// Emits the lvalue for the expression with possibly captured variable.
3266  LValue EmitOMPSharedLValue(const Expr *E);
3267 
3268 private:
3269  /// Helpers for blocks.
3270  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
3271 
3272  /// struct with the values to be passed to the OpenMP loop-related functions
3273  struct OMPLoopArguments {
3274  /// loop lower bound
3275  Address LB = Address::invalid();
3276  /// loop upper bound
3277  Address UB = Address::invalid();
3278  /// loop stride
3279  Address ST = Address::invalid();
3280  /// isLastIteration argument for runtime functions
3281  Address IL = Address::invalid();
3282  /// Chunk value generated by sema
3283  llvm::Value *Chunk = nullptr;
3284  /// EnsureUpperBound
3285  Expr *EUB = nullptr;
3286  /// IncrementExpression
3287  Expr *IncExpr = nullptr;
3288  /// Loop initialization
3289  Expr *Init = nullptr;
3290  /// Loop exit condition
3291  Expr *Cond = nullptr;
3292  /// Update of LB after a whole chunk has been executed
3293  Expr *NextLB = nullptr;
3294  /// Update of UB after a whole chunk has been executed
3295  Expr *NextUB = nullptr;
3296  OMPLoopArguments() = default;
3297  OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
3298  llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
3299  Expr *IncExpr = nullptr, Expr *Init = nullptr,
3300  Expr *Cond = nullptr, Expr *NextLB = nullptr,
3301  Expr *NextUB = nullptr)
3302  : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
3303  IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
3304  NextUB(NextUB) {}
3305  };
3306  void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
3307  const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
3308  const OMPLoopArguments &LoopArgs,
3309  const CodeGenLoopTy &CodeGenLoop,
3310  const CodeGenOrderedTy &CodeGenOrdered);
3311  void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
3312  bool IsMonotonic, const OMPLoopDirective &S,
3313  OMPPrivateScope &LoopScope, bool Ordered,
3314  const OMPLoopArguments &LoopArgs,
3315  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3316  void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
3317  const OMPLoopDirective &S,
3318  OMPPrivateScope &LoopScope,
3319  const OMPLoopArguments &LoopArgs,
3320  const CodeGenLoopTy &CodeGenLoopContent);
3321  /// Emit code for sections directive.
3322  void EmitSections(const OMPExecutableDirective &S);
3323 
3324 public:
3325 
3326  //===--------------------------------------------------------------------===//
3327  // LValue Expression Emission
3328  //===--------------------------------------------------------------------===//
3329 
3330  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
3331  RValue GetUndefRValue(QualType Ty);
3332 
3333  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
3334  /// and issue an ErrorUnsupported style diagnostic (using the
3335  /// provided Name).
3336  RValue EmitUnsupportedRValue(const Expr *E,
3337  const char *Name);
3338 
3339  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
3340  /// an ErrorUnsupported style diagnostic (using the provided Name).
3341  LValue EmitUnsupportedLValue(const Expr *E,
3342  const char *Name);
3343 
3344  /// EmitLValue - Emit code to compute a designator that specifies the location
3345  /// of the expression.
3346  ///
3347  /// This can return one of two things: a simple address or a bitfield
3348  /// reference. In either case, the LLVM Value* in the LValue structure is
3349  /// guaranteed to be an LLVM pointer type.
3350  ///
3351  /// If this returns a bitfield reference, nothing about the pointee type of
3352  /// the LLVM value is known: For example, it may not be a pointer to an
3353  /// integer.
3354  ///
3355  /// If this returns a normal address, and if the lvalue's C type is fixed
3356  /// size, this method guarantees that the returned pointer type will point to
3357  /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
3358  /// variable length type, this is not possible.
3359  ///
3360  LValue EmitLValue(const Expr *E);
3361 
3362  /// Same as EmitLValue but additionally we generate checking code to
3363  /// guard against undefined behavior. This is only suitable when we know
3364  /// that the address will be used to access the object.
3365  LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
3366 
3367  RValue convertTempToRValue(Address addr, QualType type,
3368  SourceLocation Loc);
3369 
3370  void EmitAtomicInit(Expr *E, LValue lvalue);
3371 
3372  bool LValueIsSuitableForInlineAtomic(LValue Src);
3373 
3374  RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
3375  AggValueSlot Slot = AggValueSlot::ignored());
3376 
3377  RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
3378  llvm::AtomicOrdering AO, bool IsVolatile = false,
3379  AggValueSlot slot = AggValueSlot::ignored());
3380 
3381  void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
3382 
3383  void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
3384  bool IsVolatile, bool isInit);
3385 
3386  std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
3387  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
3388  llvm::AtomicOrdering Success =
3389  llvm::AtomicOrdering::SequentiallyConsistent,
3390  llvm::AtomicOrdering Failure =
3391  llvm::AtomicOrdering::SequentiallyConsistent,
3392  bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
3393 
3394  void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
3395  const llvm::function_ref<RValue(RValue)> &UpdateOp,
3396  bool IsVolatile);
3397 
3398  /// EmitToMemory - Change a scalar value from its value
3399  /// representation to its in-memory representation.
3400  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
3401 
3402  /// EmitFromMemory - Change a scalar value from its memory
3403  /// representation to its value representation.
3404  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
3405 
3406  /// Check if the scalar \p Value is within the valid range for the given
3407  /// type \p Ty.
3408  ///
3409  /// Returns true if a check is needed (even if the range is unknown).
3410  bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
3411  SourceLocation Loc);
3412 
3413  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3414  /// care to appropriately convert from the memory representation to
3415  /// the LLVM value representation.
3416  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3417  SourceLocation Loc,
3419  bool isNontemporal = false) {
3420  return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
3421  CGM.getTBAAAccessInfo(Ty), isNontemporal);
3422  }
3423 
3424  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3425  SourceLocation Loc, LValueBaseInfo BaseInfo,
3426  TBAAAccessInfo TBAAInfo,
3427  bool isNontemporal = false);
3428 
3429  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3430  /// care to appropriately convert from the memory representation to
3431  /// the LLVM value representation. The l-value must be a simple
3432  /// l-value.
3433  llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
3434 
3435  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3436  /// care to appropriately convert from the memory representation to
3437  /// the LLVM value representation.
3439  bool Volatile, QualType Ty,
3441  bool isInit = false, bool isNontemporal = false) {
3442  EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
3443  CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
3444  }
3445 
3446  void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
3447  bool Volatile, QualType Ty,
3448  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
3449  bool isInit = false, bool isNontemporal = false);
3450 
3451  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3452  /// care to appropriately convert from the memory representation to
3453  /// the LLVM value representation. The l-value must be a simple
3454  /// l-value. The isInit flag indicates whether this is an initialization.
3455  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
3456  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
3457 
3458  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
3459  /// this method emits the address of the lvalue, then loads the result as an
3460  /// rvalue, returning the rvalue.
3461  RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
3462  RValue EmitLoadOfExtVectorElementLValue(LValue V);
3463  RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
3464  RValue EmitLoadOfGlobalRegLValue(LValue LV);
3465 
3466  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
3467  /// lvalue, where both are guaranteed to the have the same type, and that type
3468  /// is 'Ty'.
3469  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
3470  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
3471  void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
3472 
3473  /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
3474  /// as EmitStoreThroughLValue.
3475  ///
3476  /// \param Result [out] - If non-null, this will be set to a Value* for the
3477  /// bit-field contents after the store, appropriate for use as the result of
3478  /// an assignment to the bit-field.
3479  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
3480  llvm::Value **Result=nullptr);
3481 
3482  /// Emit an l-value for an assignment (simple or compound) of complex type.
3483  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
3484  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
3485  LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
3486  llvm::Value *&Result);
3487 
3488  // Note: only available for agg return types
3489  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
3490  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
3491  // Note: only available for agg return types
3492  LValue EmitCallExprLValue(const CallExpr *E);
3493  // Note: only available for agg return types
3494  LValue EmitVAArgExprLValue(const VAArgExpr *E);
3495  LValue EmitDeclRefLValue(const DeclRefExpr *E);
3496  LValue EmitStringLiteralLValue(const StringLiteral *E);
3497  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
3498  LValue EmitPredefinedLValue(const PredefinedExpr *E);
3499  LValue EmitUnaryOpLValue(const UnaryOperator *E);
3500  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
3501  bool Accessed = false);
3502  LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
3503  bool IsLowerBound = true);
3504  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
3505  LValue EmitMemberExpr(const MemberExpr *E);
3506  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
3507  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
3508  LValue EmitInitListLValue(const InitListExpr *E);
3509  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
3510  LValue EmitCastLValue(const CastExpr *E);
3511  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
3512  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
3513 
3514  Address EmitExtVectorElementLValue(LValue V);
3515 
3516  RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
3517 
3518  Address EmitArrayToPointerDecay(const Expr *Array,
3519  LValueBaseInfo *BaseInfo = nullptr,
3520  TBAAAccessInfo *TBAAInfo = nullptr);
3521 
3523  llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
3524  ConstantEmission(llvm::Constant *C, bool isReference)
3525  : ValueAndIsReference(C, isReference) {}
3526  public:
3528  static ConstantEmission forReference(llvm::Constant *C) {
3529  return ConstantEmission(C, true);
3530  }
3531  static ConstantEmission forValue(llvm::Constant *C) {
3532  return ConstantEmission(C, false);
3533  }
3534 
3535  explicit operator bool() const {
3536  return ValueAndIsReference.getOpaqueValue() != nullptr;
3537  }
3538 
3539  bool isReference() const { return ValueAndIsReference.getInt(); }
3540  LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
3541  assert(isReference());
3542  return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
3543  refExpr->getType());
3544  }
3545 
3546  llvm::Constant *getValue() const {
3547  assert(!isReference());
3548  return ValueAndIsReference.getPointer();
3549  }
3550  };
3551 
3552  ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
3553  ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
3554  llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
3555 
3556  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
3557  AggValueSlot slot = AggValueSlot::ignored());
3558  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
3559 
3560  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
3561  const ObjCIvarDecl *Ivar);
3562  LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
3563  LValue EmitLValueForLambdaField(const FieldDecl *Field);
3564 
3565  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
3566  /// if the Field is a reference, this will return the address of the reference
3567  /// and not the address of the value stored in the reference.
3568  LValue EmitLValueForFieldInitialization(LValue Base,
3569  const FieldDecl* Field);
3570 
3571  LValue EmitLValueForIvar(QualType ObjectTy,
3572  llvm::Value* Base, const ObjCIvarDecl *Ivar,
3573  unsigned CVRQualifiers);
3574 
3575  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
3576  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
3577  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
3578  LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
3579 
3580  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
3581  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
3582  LValue EmitStmtExprLValue(const StmtExpr *E);
3583  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
3584  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
3585  void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
3586 
3587  //===--------------------------------------------------------------------===//
3588  // Scalar Expression Emission
3589  //===--------------------------------------------------------------------===//
3590 
3591  /// EmitCall - Generate a call of the given function, expecting the given
3592  /// result type, and using the given argument list which specifies both the
3593  /// LLVM arguments and the types they were derived from.
3594  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3595  ReturnValueSlot ReturnValue, const CallArgList &Args,
3596  llvm::CallBase **callOrInvoke, SourceLocation Loc);
3597  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3598  ReturnValueSlot ReturnValue, const CallArgList &Args,
3599  llvm::CallBase **callOrInvoke = nullptr) {
3600  return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
3601  SourceLocation());
3602  }
3603  RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
3604  ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
3605  RValue EmitCallExpr(const CallExpr *E,
3606  ReturnValueSlot ReturnValue = ReturnValueSlot());
3607  RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3608  CGCallee EmitCallee(const Expr *E);
3609 
3610  void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
3611 
3612  llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
3613  const Twine &name = "");
3614  llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
3616  const Twine &name = "");
3617  llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3618  const Twine &name = "");
3619  llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3621  const Twine &name = "");
3622 
3624  getBundlesForFunclet(llvm::Value *Callee);
3625 
3626  llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
3628  const Twine &Name = "");
3629  llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3631  const Twine &name = "");
3632  llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3633  const Twine &name = "");
3634  void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3636 
3638  NestedNameSpecifier *Qual,
3639  llvm::Type *Ty);
3640 
3641  CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
3642  CXXDtorType Type,
3643  const CXXRecordDecl *RD);
3644 
3645  // Return the copy constructor name with the prefix "__copy_constructor_"
3646  // removed.
3647  static std::string getNonTrivialCopyConstructorStr(QualType QT,
3648  CharUnits Alignment,
3649  bool IsVolatile,
3650  ASTContext &Ctx);
3651 
3652  // Return the destructor name with the prefix "__destructor_" removed.
3653  static std::string getNonTrivialDestructorStr(QualType QT,
3654  CharUnits Alignment,
3655  bool IsVolatile,
3656  ASTContext &Ctx);
3657 
3658  // These functions emit calls to the special functions of non-trivial C
3659  // structs.
3660  void defaultInitNonTrivialCStructVar(LValue Dst);
3661  void callCStructDefaultConstructor(LValue Dst);
3662  void callCStructDestructor(LValue Dst);
3663  void callCStructCopyConstructor(LValue Dst, LValue Src);
3664  void callCStructMoveConstructor(LValue Dst, LValue Src);
3665  void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
3666  void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
3667 
3668  RValue
3669  EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method,
3670  const CGCallee &Callee,
3671  ReturnValueSlot ReturnValue, llvm::Value *This,
3672  llvm::Value *ImplicitParam,
3673  QualType ImplicitParamTy, const CallExpr *E,
3674  CallArgList *RtlArgs);
3675  RValue EmitCXXDestructorCall(GlobalDecl Dtor,
3676  const CGCallee &Callee,
3677  llvm::Value *This, llvm::Value *ImplicitParam,
3678  QualType ImplicitParamTy, const CallExpr *E);
3679  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
3680  ReturnValueSlot ReturnValue);
3681  RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
3682  const CXXMethodDecl *MD,
3683  ReturnValueSlot ReturnValue,
3684  bool HasQualifier,
3685  NestedNameSpecifier *Qualifier,
3686  bool IsArrow, const Expr *Base);
3687  // Compute the object pointer.
3688  Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
3689  llvm::Value *memberPtr,
3690  const MemberPointerType *memberPtrType,
3691  LValueBaseInfo *BaseInfo = nullptr,
3692  TBAAAccessInfo *TBAAInfo = nullptr);
3693  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
3694  ReturnValueSlot ReturnValue);
3695 
3696  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
3697  const CXXMethodDecl *MD,
3698  ReturnValueSlot ReturnValue);
3699  RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
3700 
3701  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
3702  ReturnValueSlot ReturnValue);
3703 
3704  RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
3705  ReturnValueSlot ReturnValue);
3706 
3707  RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
3708  const CallExpr *E, ReturnValueSlot ReturnValue);
3709 
3710  RValue emitRotate(const CallExpr *E, bool IsRotateRight);
3711 
3712  /// Emit IR for __builtin_os_log_format.
3713  RValue emitBuiltinOSLogFormat(const CallExpr &E);
3714 
3715  llvm::Function *generateBuiltinOSLogHelperFunction(
3716  const analyze_os_log::OSLogBufferLayout &Layout,
3717  CharUnits BufferAlignment);
3718 
3719  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3720 
3721  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
3722  /// is unhandled by the current target.
3723  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3724 
3725  llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
3726  const llvm::CmpInst::Predicate Fp,
3727  const llvm::CmpInst::Predicate Ip,
3728  const llvm::Twine &Name = "");
3729  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3730  llvm::Triple::ArchType Arch);
3731 
3732  llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
3733  unsigned LLVMIntrinsic,
3734  unsigned AltLLVMIntrinsic,
3735  const char *NameHint,
3736  unsigned Modifier,
3737  const CallExpr *E,
3739  Address PtrOp0, Address PtrOp1,
3740  llvm::Triple::ArchType Arch);
3741 
3742  llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
3743  unsigned Modifier, llvm::Type *ArgTy,
3744  const CallExpr *E);
3745  llvm::Value *EmitNeonCall(llvm::Function *F,
3747  const char *name,
3748  unsigned shift = 0, bool rightshift = false);
3749  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
3750  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
3751  bool negateForRightShift);
3752  llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
3753  llvm::Type *Ty, bool usgn, const char *name);
3754  llvm::Value *vectorWrapScalar16(llvm::Value *Op);
3755  llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3756  llvm::Triple::ArchType Arch);
3757 
3758  llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
3759  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3760  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3761  llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3762  llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3763  llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3764  llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
3765  const CallExpr *E);
3766  llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3767 
3768 private:
3769  enum class MSVCIntrin;
3770 
3771 public:
3772  llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
3773 
3774  llvm::Value *EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args);
3775 
3776  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
3777  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
3778  llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
3779  llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
3780  llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
3781  llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
3782  const ObjCMethodDecl *MethodWithObjects);
3783  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
3784  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
3785  ReturnValueSlot Return = ReturnValueSlot());
3786 
3787  /// Retrieves the default cleanup kind for an ARC cleanup.
3788  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
3790  return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
3792  }
3793 
3794  // ARC primitives.
3795  void EmitARCInitWeak(Address addr, llvm::Value *value);
3796  void EmitARCDestroyWeak(Address addr);
3797  llvm::Value *EmitARCLoadWeak(Address addr);
3798  llvm::Value *EmitARCLoadWeakRetained(Address addr);
3799  llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
3800  void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3801  void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3802  void EmitARCCopyWeak(Address dst, Address src);
3803  void EmitARCMoveWeak(Address dst, Address src);
3804  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
3805  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
3806  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
3807  bool resultIgnored);
3808  llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
3809  bool resultIgnored);
3810  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
3811  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
3812  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
3813  void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
3814  void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
3815  llvm::Value *EmitARCAutorelease(llvm::Value *value);
3816  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
3817  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
3818  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
3819  llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
3820 
3821  llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
3822  llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
3823  llvm::Type *returnType);
3824  void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
3825 
3826  std::pair<LValue,llvm::Value*>
3827  EmitARCStoreAutoreleasing(const BinaryOperator *e);
3828  std::pair<LValue,llvm::Value*>
3829  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
3830  std::pair<LValue,llvm::Value*>
3831  EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
3832 
3833  llvm::Value *EmitObjCAlloc(llvm::Value *value,
3834  llvm::Type *returnType);
3835  llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
3836  llvm::Type *returnType);
3837  llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
3838 
3839  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
3840  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
3841  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
3842 
3843  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
3844  llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
3845  bool allowUnsafeClaim);
3846  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
3847  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
3848  llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
3849 
3850  void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
3851 
3852  static Destroyer destroyARCStrongImprecise;
3853  static Destroyer destroyARCStrongPrecise;
3854  static Destroyer destroyARCWeak;
3855  static Destroyer emitARCIntrinsicUse;
3856  static Destroyer destroyNonTrivialCStruct;
3857 
3858  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
3859  llvm::Value *EmitObjCAutoreleasePoolPush();
3860  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
3861  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
3862  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
3863 
3864  /// Emits a reference binding to the passed in expression.
3865  RValue EmitReferenceBindingToExpr(const Expr *E);
3866 
3867  //===--------------------------------------------------------------------===//
3868  // Expression Emission
3869  //===--------------------------------------------------------------------===//
3870 
3871  // Expressions are broken into three classes: scalar, complex, aggregate.
3872 
3873  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
3874  /// scalar type, returning the result.
3875  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
3876 
3877  /// Emit a conversion from the specified type to the specified destination
3878  /// type, both of which are LLVM scalar types.
3879  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
3880  QualType DstTy, SourceLocation Loc);
3881 
3882  /// Emit a conversion from the specified complex type to the specified
3883  /// destination type, where the destination type is an LLVM scalar type.
3884  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
3885  QualType DstTy,
3886  SourceLocation Loc);
3887 
3888  /// EmitAggExpr - Emit the computation of the specified expression
3889  /// of aggregate type. The result is computed into the given slot,
3890  /// which may be null to indicate that the value is not needed.
3891  void EmitAggExpr(const Expr *E, AggValueSlot AS);
3892 
3893  /// EmitAggExprToLValue - Emit the computation of the specified expression of
3894  /// aggregate type into a temporary LValue.
3895  LValue EmitAggExprToLValue(const Expr *E);
3896 
3897  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3898  /// make sure it survives garbage collection until this point.
3899  void EmitExtendGCLifetime(llvm::Value *object);
3900 
3901  /// EmitComplexExpr - Emit the computation of the specified expression of
3902  /// complex type, returning the result.
3903  ComplexPairTy EmitComplexExpr(const Expr *E,
3904  bool IgnoreReal = false,
3905  bool IgnoreImag = false);
3906 
3907  /// EmitComplexExprIntoLValue - Emit the given expression of complex
3908  /// type and place its result into the specified l-value.
3909  void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
3910 
3911  /// EmitStoreOfComplex - Store a complex number into the specified l-value.
3912  void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
3913 
3914  /// EmitLoadOfComplex - Load a complex number from the specified l-value.
3915  ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
3916 
3917  Address emitAddrOfRealComponent(Address complex, QualType complexType);
3918  Address emitAddrOfImagComponent(Address complex, QualType complexType);
3919 
3920  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
3921  /// global variable that has already been created for it. If the initializer
3922  /// has a different type than GV does, this may free GV and return a different
3923  /// one. Otherwise it just returns GV.
3924  llvm::GlobalVariable *
3925  AddInitializerToStaticVarDecl(const VarDecl &D,
3926  llvm::GlobalVariable *GV);
3927 
3928  // Emit an @llvm.invariant.start call for the given memory region.
3929  void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
3930 
3931  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
3932  /// variable with global storage.
3933  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
3934  bool PerformInit);
3935 
3936  llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
3937  llvm::Constant *Addr);
3938 
3939  /// Call atexit() with a function that passes the given argument to
3940  /// the given function.
3941  void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
3942  llvm::Constant *addr);
3943 
3944  /// Call atexit() with function dtorStub.
3945  void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
3946 
3947  /// Emit code in this function to perform a guarded variable
3948  /// initialization. Guarded initializations are used when it's not
3949  /// possible to prove that an initialization will be done exactly
3950  /// once, e.g. with a static local variable or a static data member
3951  /// of a class template.
3952  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
3953  bool PerformInit);
3954 
3955  enum class GuardKind { VariableGuard, TlsGuard };
3956 
3957  /// Emit a branch to select whether or not to perform guarded initialization.
3958  void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
3959  llvm::BasicBlock *InitBlock,
3960  llvm::BasicBlock *NoInitBlock,
3961  GuardKind Kind, const VarDecl *D);
3962 
3963  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
3964  /// variables.
3965  void
3966  GenerateCXXGlobalInitFunc(llvm::Function *Fn,
3967  ArrayRef<llvm::Function *> CXXThreadLocals,
3968  ConstantAddress Guard = ConstantAddress::invalid());
3969 
3970  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
3971  /// variables.
3972  void GenerateCXXGlobalDtorsFunc(
3973  llvm::Function *Fn,
3974  const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
3975  llvm::Constant *>> &DtorsAndObjects);
3976 
3977  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
3978  const VarDecl *D,
3979  llvm::GlobalVariable *Addr,
3980  bool PerformInit);
3981 
3982  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
3983 
3984  void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
3985 
3987  if (const auto *EWC = dyn_cast<ExprWithCleanups>(E))
3988  if (EWC->getNumObjects() == 0)
3989  return;
3990  enterNonTrivialFullExpression(E);
3991  }
3992  void enterNonTrivialFullExpression(const FullExpr *E);
3993 
3994  void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
3995 
3996  RValue EmitAtomicExpr(AtomicExpr *E);
3997 
3998  //===--------------------------------------------------------------------===//
3999  // Annotations Emission
4000  //===--------------------------------------------------------------------===//
4001 
4002  /// Emit an annotation call (intrinsic).
4003  llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
4004  llvm::Value *AnnotatedVal,
4005  StringRef AnnotationStr,
4006  SourceLocation Location);
4007 
4008  /// Emit local annotations for the local variable V, declared by D.
4009  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
4010 
4011  /// Emit field annotations for the given field & value. Returns the
4012  /// annotation result.
4013  Address EmitFieldAnnotations(const FieldDecl *D, Address V);
4014 
4015  //===--------------------------------------------------------------------===//
4016  // Internal Helpers
4017  //===--------------------------------------------------------------------===//
4018 
4019  /// ContainsLabel - Return true if the statement contains a label in it. If
4020  /// this statement is not executed normally, it not containing a label means
4021  /// that we can just remove the code.
4022  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
4023 
4024  /// containsBreak - Return true if the statement contains a break out of it.
4025  /// If the statement (recursively) contains a switch or loop with a break
4026  /// inside of it, this is fine.
4027  static bool containsBreak(const Stmt *S);
4028 
4029  /// Determine if the given statement might introduce a declaration into the
4030  /// current scope, by being a (possibly-labelled) DeclStmt.
4031  static bool mightAddDeclToScope(const Stmt *S);
4032 
4033  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4034  /// to a constant, or if it does but contains a label, return false. If it
4035  /// constant folds return true and set the boolean result in Result.
4036  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
4037  bool AllowLabels = false);
4038 
4039  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4040  /// to a constant, or if it does but contains a label, return false. If it
4041  /// constant folds return true and set the folded value.
4042  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
4043  bool AllowLabels = false);
4044 
4045  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
4046  /// if statement) to the specified blocks. Based on the condition, this might
4047  /// try to simplify the codegen of the conditional based on the branch.
4048  /// TrueCount should be the number of times we expect the condition to
4049  /// evaluate to true based on PGO data.
4050  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
4051  llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
4052 
4053  /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
4054  /// nonnull, if \p LHS is marked _Nonnull.
4055  void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
4056 
4057  /// An enumeration which makes it easier to specify whether or not an
4058  /// operation is a subtraction.
4059  enum { NotSubtraction = false, IsSubtraction = true };
4060 
4061  /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
4062  /// detect undefined behavior when the pointer overflow sanitizer is enabled.
4063  /// \p SignedIndices indicates whether any of the GEP indices are signed.
4064  /// \p IsSubtraction indicates whether the expression used to form the GEP
4065  /// is a subtraction.
4066  llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
4067  ArrayRef<llvm::Value *> IdxList,
4068  bool SignedIndices,
4069  bool IsSubtraction,
4070  SourceLocation Loc,
4071  const Twine &Name = "");
4072 
4073  /// Specifies which type of sanitizer check to apply when handling a
4074  /// particular builtin.
4078  };
4079 
4080  /// Emits an argument for a call to a builtin. If the builtin sanitizer is
4081  /// enabled, a runtime check specified by \p Kind is also emitted.
4082  llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
4083 
4084  /// Emit a description of a type in a format suitable for passing to
4085  /// a runtime sanitizer handler.
4086  llvm::Constant *EmitCheckTypeDescriptor(QualType T);
4087 
4088  /// Convert a value into a format suitable for passing to a runtime
4089  /// sanitizer handler.
4090  llvm::Value *EmitCheckValue(llvm::Value *V);
4091 
4092  /// Emit a description of a source location in a format suitable for
4093  /// passing to a runtime sanitizer handler.
4094  llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
4095 
4096  /// Create a basic block that will either trap or call a handler function in
4097  /// the UBSan runtime with the provided arguments, and create a conditional
4098  /// branch to it.
4099  void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
4100  SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
4101  ArrayRef<llvm::Value *> DynamicArgs);
4102 
4103  /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
4104  /// if Cond if false.
4105  void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
4106  llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4107  ArrayRef<llvm::Constant *> StaticArgs);
4108 
4109  /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
4110  /// checking is enabled. Otherwise, just emit an unreachable instruction.
4111  void EmitUnreachable(SourceLocation Loc);
4112 
4113  /// Create a basic block that will call the trap intrinsic, and emit a
4114  /// conditional branch to it, for the -ftrapv checks.
4115  void EmitTrapCheck(llvm::Value *Checked);
4116 
4117  /// Emit a call to trap or debugtrap and attach function attribute
4118  /// "trap-func-name" if specified.
4119  llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
4120 
4121  /// Emit a stub for the cross-DSO CFI check function.
4122  void EmitCfiCheckStub();
4123 
4124  /// Emit a cross-DSO CFI failure handling function.
4125  void EmitCfiCheckFail();
4126 
4127  /// Create a check for a function parameter that may potentially be
4128  /// declared as non-null.
4129  void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
4130  AbstractCallee AC, unsigned ParmNum);
4131 
4132  /// EmitCallArg - Emit a single call argument.
4133  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
4134 
4135  /// EmitDelegateCallArg - We are performing a delegate call; that
4136  /// is, the current function is delegating to another one. Produce
4137  /// a r-value suitable for passing the given parameter.
4138  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
4139  SourceLocation loc);
4140 
4141  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
4142  /// point operation, expressed as the maximum relative error in ulp.
4143  void SetFPAccuracy(llvm::Value *Val, float Accuracy);
4144 
4145 private:
4146  llvm::MDNode *getRangeForLoadFromType(QualType Ty);
4147  void EmitReturnOfRValue(RValue RV, QualType Ty);
4148 
4149  void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
4150 
4152  DeferredReplacements;
4153 
4154  /// Set the address of a local variable.
4155  void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
4156  assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
4157  LocalDeclMap.insert({VD, Addr});
4158  }
4159 
4160  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
4161  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
4162  ///
4163  /// \param AI - The first function argument of the expansion.
4164  void ExpandTypeFromArgs(QualType Ty, LValue Dst,
4166 
4167  /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
4168  /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
4169  /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
4170  void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
4171  SmallVectorImpl<llvm::Value *> &IRCallArgs,
4172  unsigned &IRCallArgPos);
4173 
4174  llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
4175  const Expr *InputExpr, std::string &ConstraintStr);
4176 
4177  llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
4178  LValue InputValue, QualType InputType,
4179  std::string &ConstraintStr,
4180  SourceLocation Loc);
4181 
4182  /// Attempts to statically evaluate the object size of E. If that
4183  /// fails, emits code to figure the size of E out for us. This is
4184  /// pass_object_size aware.
4185  ///
4186  /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
4187  llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
4188  llvm::IntegerType *ResType,
4189  llvm::Value *EmittedE,
4190  bool IsDynamic);
4191 
4192  /// Emits the size of E, as required by __builtin_object_size. This
4193  /// function is aware of pass_object_size parameters, and will act accordingly
4194  /// if E is a parameter with the pass_object_size attribute.
4195  llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
4196  llvm::IntegerType *ResType,
4197  llvm::Value *EmittedE,
4198  bool IsDynamic);
4199 
4200 public:
4201 #ifndef NDEBUG
4202  // Determine whether the given argument is an Objective-C method
4203  // that may have type parameters in its signature.
4204  static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4205  const DeclContext *dc = method->getDeclContext();
4206  if (const ObjCInterfaceDecl *classDecl= dyn_cast<ObjCInterfaceDecl>(dc)) {
4207  return classDecl->getTypeParamListAsWritten();
4208  }
4209 
4210  if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4211  return catDecl->getTypeParamList();
4212  }
4213 
4214  return false;
4215  }
4216 
4217  template<typename T>
4218  static bool isObjCMethodWithTypeParams(const T *) { return false; }
4219 #endif
4220 
4221  enum class EvaluationOrder {
4222  ///! No language constraints on evaluation order.
4223  Default,
4224  ///! Language semantics require left-to-right evaluation.
4225  ForceLeftToRight,
4226  ///! Language semantics require right-to-left evaluation.
4227  ForceRightToLeft
4228  };
4229 
4230  /// EmitCallArgs - Emit call arguments for a function.
4231  template <typename T>
4232  void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
4233  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4235  unsigned ParamsToSkip = 0,
4236  EvaluationOrder Order = EvaluationOrder::Default) {
4237  SmallVector<QualType, 16> ArgTypes;
4238  CallExpr::const_arg_iterator Arg = ArgRange.begin();
4239 
4240  assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
4241  "Can't skip parameters if type info is not provided");
4242  if (CallArgTypeInfo) {
4243 #ifndef NDEBUG
4244  bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo);
4245 #endif
4246 
4247  // First, use the argument types that the type info knows about
4248  for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
4249  E = CallArgTypeInfo->param_type_end();
4250  I != E; ++I, ++Arg) {
4251  assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4252  assert((isGenericMethod ||
4253  ((*I)->isVariablyModifiedType() ||
4254  (*I).getNonReferenceType()->isObjCRetainableType() ||
4255  getContext()
4256  .getCanonicalType((*I).getNonReferenceType())
4257  .getTypePtr() ==
4258  getContext()
4259  .getCanonicalType((*Arg)->getType())
4260  .getTypePtr())) &&
4261  "type mismatch in call argument!");
4262  ArgTypes.push_back(*I);
4263  }
4264  }
4265 
4266  // Either we've emitted all the call args, or we have a call to variadic
4267  // function.
4268  assert((Arg == ArgRange.end() || !CallArgTypeInfo ||
4269  CallArgTypeInfo->isVariadic()) &&
4270  "Extra arguments in non-variadic function!");
4271 
4272  // If we still have any arguments, emit them using the type of the argument.
4273  for (auto *A : llvm::make_range(Arg, ArgRange.end()))
4274  ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType());
4275 
4276  EmitCallArgs(Args, ArgTypes, ArgRange, AC, ParamsToSkip, Order);
4277  }
4278 
4279  void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
4280  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4282  unsigned ParamsToSkip = 0,
4283  EvaluationOrder Order = EvaluationOrder::Default);
4284 
4285  /// EmitPointerWithAlignment - Given an expression with a pointer type,
4286  /// emit the value and compute our best estimate of the alignment of the
4287  /// pointee.
4288  ///
4289  /// \param BaseInfo - If non-null, this will be initialized with
4290  /// information about the source of the alignment and the may-alias
4291  /// attribute. Note that this function will conservatively fall back on
4292  /// the type when it doesn't recognize the expression and may-alias will
4293  /// be set to false.
4294  ///
4295  /// One reasonable way to use this information is when there's a language
4296  /// guarantee that the pointer must be aligned to some stricter value, and
4297  /// we're simply trying to ensure that sufficiently obvious uses of under-
4298  /// aligned objects don't get miscompiled; for example, a placement new
4299  /// into the address of a local variable. In such a case, it's quite
4300  /// reasonable to just ignore the returned alignment when it isn't from an
4301  /// explicit source.
4302  Address EmitPointerWithAlignment(const Expr *Addr,
4303  LValueBaseInfo *BaseInfo = nullptr,
4304  TBAAAccessInfo *TBAAInfo = nullptr);
4305 
4306  /// If \p E references a parameter with pass_object_size info or a constant
4307  /// array size modifier, emit the object size divided by the size of \p EltTy.
4308  /// Otherwise return null.
4309  llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
4310 
4311  void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
4312 
4314  llvm::Function *Function;
4316  struct Conds {
4317  StringRef Architecture;
4319 
4320  Conds(StringRef Arch, ArrayRef<StringRef> Feats)
4321  : Architecture(Arch), Features(Feats.begin(), Feats.end()) {}
4322  } Conditions;
4323 
4324  MultiVersionResolverOption(llvm::Function *F, StringRef Arch,
4325  ArrayRef<StringRef> Feats)
4326  : Function(F), Conditions(Arch, Feats) {}
4327  };
4328 
4329  // Emits the body of a multiversion function's resolver. Assumes that the
4330  // options are already sorted in the proper order, with the 'default' option
4331  // last (if it exists).
4332  void EmitMultiVersionResolver(llvm::Function *Resolver,
4334 
4335  static uint64_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
4336 
4337 private:
4338  QualType getVarArgType(const Expr *Arg);
4339 
4340  void EmitDeclMetadata();
4341 
4342  BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
4343  const AutoVarEmission &emission);
4344 
4345  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
4346 
4347  llvm::Value *GetValueForARMHint(unsigned BuiltinID);
4348  llvm::Value *EmitX86CpuIs(const CallExpr *E);
4349  llvm::Value *EmitX86CpuIs(StringRef CPUStr);
4350  llvm::Value *EmitX86CpuSupports(const CallExpr *E);
4351  llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
4352  llvm::Value *EmitX86CpuSupports(uint64_t Mask);
4353  llvm::Value *EmitX86CpuInit();
4354  llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
4355 };
4356 
4358 DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
4359  if (!needsSaving(value)) return saved_type(value, false);
4360 
4361  // Otherwise, we need an alloca.
4362  auto align = CharUnits::fromQuantity(
4363  CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
4364  Address alloca =
4365  CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
4366  CGF.Builder.CreateStore(value, alloca);
4367 
4368  return saved_type(alloca.getPointer(), true);
4369 }
4370 
4371 inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
4372  saved_type value) {
4373  // If the value says it wasn't saved, trust that it's still dominating.
4374  if (!value.getInt()) return value.getPointer();
4375 
4376  // Otherwise, it should be an alloca instruction, as set up in save().
4377  auto alloca = cast<llvm::AllocaInst>(value.getPointer());
4378  return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
4379 }
4380 
4381 } // end namespace CodeGen
4382 } // end namespace clang
4383 
4384 #endif
const llvm::DataLayout & getDataLayout() const
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:77
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:363
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
Optional< uint64_t > getStmtCount(const Stmt *S)
Check if an execution count is known for a given statement.
Definition: CodeGenPGO.h:62
This represents &#39;#pragma omp distribute simd&#39; composite directive.
Definition: StmtOpenMP.h:3271
Information about the layout of a __block variable.
Definition: CGBlocks.h:143
This represents &#39;#pragma omp master&#39; directive.
Definition: StmtOpenMP.h:1454
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
This represents &#39;#pragma omp task&#39; directive.
Definition: StmtOpenMP.h:1794
Represents a function declaration or definition.
Definition: Decl.h:1743
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2549
Scheduling data for loop-based OpenMP directives.
Definition: OpenMPKinds.h:156
A (possibly-)qualified type.
Definition: Type.h:639
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Type *Ty, const CXXRecordDecl *RD)
Definition: CGCXX.cpp:246
const CodeGenOptions & getCodeGenOpts() const
The class detects jumps which bypass local variables declaration: goto L; int a; L: ...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:138
void enterFullExpression(const FullExpr *E)
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:125
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::LLVMContext & getLLVMContext()
DominatorTree GraphTraits specialization so the DominatorTree can be iterable by generic graph iterat...
Definition: Dominators.h:29
CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
FieldConstructionScope(CodeGenFunction &CGF, Address This)
Represents a &#39;co_return&#39; statement in the C++ Coroutines TS.
Definition: StmtCXX.h:456
Stmt - This represents one statement.
Definition: Stmt.h:65
IfStmt - This represents an if/then/else.
Definition: Stmt.h:1764
static T * buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo, T &&generator)
Lazily build the copy and dispose helpers for a __block variable with the given information.
Definition: CGBlocks.cpp:2584
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
C Language Family Type Representation.
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it&#39;s the GNU ...
This represents &#39;#pragma omp for simd&#39; directive.
Definition: StmtOpenMP.h:1204
Checking the &#39;this&#39; pointer for a constructor call.
bool hasVolatileMember() const
Definition: Decl.h:3682
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:88
This represents &#39;#pragma omp teams distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3682
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
static bool classof(const CGCapturedStmtInfo *)
Represents an attribute applied to a statement.
Definition: Stmt.h:1706
static Destroyer destroyARCStrongPrecise
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of &#39;this&#39;.
The base class of the type hierarchy.
Definition: Type.h:1414
This represents &#39;#pragma omp target teams distribute&#39; combined directive.
Definition: StmtOpenMP.h:3819
Represents Objective-C&#39;s @throw statement.
Definition: StmtObjC.h:332
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition: Stmt.h:3372
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2824
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1338
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
DominatingValue< T >::saved_type saveValueInCond(T value)
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const ParmVarDecl * getParamDecl(unsigned I) const
const llvm::function_ref< void(CodeGenFunction &, llvm::Function *, const OMPTaskDataTy &)> TaskGenTy
This represents &#39;#pragma omp parallel for&#39; directive.
Definition: StmtOpenMP.h:1575
void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S, llvm::Value *StepV)
Definition: CodeGenPGO.cpp:892
This represents &#39;#pragma omp target teams distribute parallel for&#39; combined directive.
Definition: StmtOpenMP.h:3887
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2562
Represents a prvalue temporary that is written into memory so that a reference can bind to it...
Definition: ExprCXX.h:4332
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
static type restore(CodeGenFunction &CGF, saved_type value)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
Represents a point when we exit a loop.
Definition: ProgramPoint.h:713
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3110
This represents &#39;#pragma omp target exit data&#39; directive.
Definition: StmtOpenMP.h:2486
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:812
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke=nullptr)
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC &#39;id&#39; type.
Definition: ExprObjC.h:1492
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:2968
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6803
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:54
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:138
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
llvm::Value * getPointer() const
Definition: Address.h:37
static ConstantEmission forValue(llvm::Constant *C)
capture_iterator capture_begin()
Retrieve an iterator pointing to the first capture.
Definition: Stmt.h:3397
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1056
Represents a parameter to a function.
Definition: Decl.h:1555
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have...
Definition: Linkage.h:23
Defines the clang::Expr interface and subclasses for C++ expressions.
The collection of all-type qualifiers we support.
Definition: Type.h:137
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:1662
Represents a struct/union/class.
Definition: Decl.h:3598
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:197
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
void setScopeDepth(EHScopeStack::stable_iterator depth)
This represents &#39;#pragma omp parallel&#39; directive.
Definition: StmtOpenMP.h:299
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:910
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
Represents a member of a struct/union/class.
Definition: Decl.h:2584
Definition: Format.h:2222
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
bool isReferenceType() const
Definition: Type.h:6355
Helper class with most of the code for saving a value for a conditional expression cleanup...
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code...
This represents &#39;#pragma omp target simd&#39; directive.
Definition: StmtOpenMP.h:3407
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1036
Defines some OpenMP-specific enums and functions.
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:5393
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself...
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function...
Definition: EHScopeStack.h:65
This represents &#39;#pragma omp barrier&#39; directive.
Definition: StmtOpenMP.h:1906
CleanupKind getCleanupKind(QualType::DestructionKind kind)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:49
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp, [NSNumber numberWithInt:42]];.
Definition: ExprObjC.h:188
The this pointer adjustment as well as an optional return adjustment for a thunk. ...
Definition: ABI.h:178
This is a common base class for loop directives (&#39;omp simd&#39;, &#39;omp for&#39;, &#39;omp for simd&#39; etc...
Definition: StmtOpenMP.h:361
This represents &#39;#pragma omp critical&#39; directive.
Definition: StmtOpenMP.h:1501
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:193
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
OpenMPDistScheduleClauseKind
OpenMP attributes for &#39;dist_schedule&#39; clause.
Definition: OpenMPKinds.h:124
bool isGLValue() const
Definition: Expr.h:254
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2446
Describes an C or C++ initializer list.
Definition: Expr.h:4284
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:680
This represents &#39;#pragma omp distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3122
void setCurrentRegionCount(uint64_t Count)
Set the counter value for the current region.
Definition: CodeGenPGO.h:58
A class controlling the emission of a finally block.
This represents &#39;#pragma omp teams distribute parallel for simd&#39; composite directive.
Definition: StmtOpenMP.h:3611
BinaryOperatorKind
static bool hasScalarEvaluationKind(QualType T)
ForStmt - This represents a &#39;for (init;cond;inc)&#39; stmt.
Definition: Stmt.h:2336
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
MultiVersionResolverOption(llvm::Function *F, StringRef Arch, ArrayRef< StringRef > Feats)
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:968
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
llvm::function_ref< std::pair< LValue, LValue > CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
LexicalScope(CodeGenFunction &CGF, SourceRange Range)
Enter a new cleanup scope.
RAII for correct setting/restoring of CapturedStmtInfo.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
Represents a declaration of a type.
Definition: Decl.h:2879
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3318
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind...
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
CXXForRangeStmt - This represents C++0x [stmt.ranged]&#39;s ranged for statement, represented as &#39;for (ra...
Definition: StmtCXX.h:134
#define LIST_SANITIZER_CHECKS
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
This represents &#39;#pragma omp cancellation point&#39; directive.
Definition: StmtOpenMP.h:2741
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:50
Expr * getSizeExpr() const
Definition: Type.h:3003
field_iterator field_begin() const
Definition: Decl.cpp:4243
CaseStmt - Represent a case statement.
Definition: Stmt.h:1430
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:99
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
This represents &#39;#pragma omp teams&#39; directive.
Definition: StmtOpenMP.h:2684
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
Enums/classes describing ABI related information about constructors, destructors and thunks...
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3041
This represents &#39;#pragma omp teams distribute simd&#39; combined directive.
Definition: StmtOpenMP.h:3541
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1289
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
Controls insertion of cancellation exit blocks in worksharing constructs.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
CallLifetimeEnd(Address addr, llvm::Value *size)
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * > CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:105
Represents an ObjC class declaration.
Definition: DeclObjC.h:1171
Checking the operand of a cast to a virtual base object.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1118
Checking the operand of a load. Must be suitably sized and aligned.
~LexicalScope()
Exit this cleanup scope, emitting any accumulated cleanups.
Checking the &#39;this&#39; pointer for a call to a non-static member function.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2758
This represents &#39;#pragma omp target parallel for simd&#39; directive.
Definition: StmtOpenMP.h:3339
OpenMP 4.0 [2.4, Array Sections].
Definition: ExprOpenMP.h:44
bool isValid() const
Definition: Address.h:35
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2392
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1277
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3699
Describes the capture of either a variable, or &#39;this&#39;, or variable-length array type.
Definition: Stmt.h:3267
This represents &#39;#pragma omp taskgroup&#39; directive.
Definition: StmtOpenMP.h:1994
const TargetCodeGenInfo & getTargetCodeGenInfo()
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:152
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
The class used to assign some variables some temporarily addresses.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4125
AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression.
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
This represents &#39;#pragma omp distribute&#39; directive.
Definition: StmtOpenMP.h:2995
Exposes information about the current target.
Definition: TargetInfo.h:161
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
bool addPrivate(const VarDecl *LocalVD, const llvm::function_ref< Address()> PrivateGen)
Registers LocalVD variable as a private and apply PrivateGen function for it to generate correspondin...
EHScopeStack::stable_iterator getScopeDepth() const
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:636
This represents one expression.
Definition: Expr.h:108
Address getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups...
Definition: EHScopeStack.h:355
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
static ParamValue forIndirect(Address addr)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)