clang  9.0.0svn
CodeGenFunction.h
Go to the documentation of this file.
1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the internal per-function state used for llvm translation.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14 #define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15 
16 #include "CGBuilder.h"
17 #include "CGDebugInfo.h"
18 #include "CGLoopInfo.h"
19 #include "CGValue.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "EHScopeStack.h"
23 #include "VarBypassDetector.h"
24 #include "clang/AST/CharUnits.h"
25 #include "clang/AST/ExprCXX.h"
26 #include "clang/AST/ExprObjC.h"
27 #include "clang/AST/ExprOpenMP.h"
28 #include "clang/AST/Type.h"
29 #include "clang/Basic/ABI.h"
33 #include "clang/Basic/TargetInfo.h"
34 #include "llvm/ADT/ArrayRef.h"
35 #include "llvm/ADT/DenseMap.h"
36 #include "llvm/ADT/MapVector.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/IR/ValueHandle.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Transforms/Utils/SanitizerStats.h"
41 
42 namespace llvm {
43 class BasicBlock;
44 class LLVMContext;
45 class MDNode;
46 class Module;
47 class SwitchInst;
48 class Twine;
49 class Value;
50 class CallSite;
51 }
52 
53 namespace clang {
54 class ASTContext;
55 class BlockDecl;
56 class CXXDestructorDecl;
57 class CXXForRangeStmt;
58 class CXXTryStmt;
59 class Decl;
60 class LabelDecl;
61 class EnumConstantDecl;
62 class FunctionDecl;
63 class FunctionProtoType;
64 class LabelStmt;
65 class ObjCContainerDecl;
66 class ObjCInterfaceDecl;
67 class ObjCIvarDecl;
68 class ObjCMethodDecl;
69 class ObjCImplementationDecl;
70 class ObjCPropertyImplDecl;
71 class TargetInfo;
72 class VarDecl;
73 class ObjCForCollectionStmt;
74 class ObjCAtTryStmt;
75 class ObjCAtThrowStmt;
76 class ObjCAtSynchronizedStmt;
77 class ObjCAutoreleasePoolStmt;
78 
79 namespace analyze_os_log {
80 class OSLogBufferLayout;
81 }
82 
83 namespace CodeGen {
84 class CodeGenTypes;
85 class CGCallee;
86 class CGFunctionInfo;
87 class CGRecordLayout;
88 class CGBlockInfo;
89 class CGCXXABI;
90 class BlockByrefHelpers;
91 class BlockByrefInfo;
92 class BlockFlags;
93 class BlockFieldFlags;
94 class RegionCodeGenTy;
95 class TargetCodeGenInfo;
96 struct OMPTaskDataTy;
97 struct CGCoroData;
98 
99 /// The kind of evaluation to perform on values of a particular
100 /// type. Basically, is the code in CGExprScalar, CGExprComplex, or
101 /// CGExprAgg?
102 ///
103 /// TODO: should vectors maybe be split out into their own thing?
108 };
109 
110 #define LIST_SANITIZER_CHECKS \
111  SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
112  SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
113  SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
114  SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
115  SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
116  SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
117  SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
118  SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
119  SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
120  SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
121  SANITIZER_CHECK(MissingReturn, missing_return, 0) \
122  SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
123  SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
124  SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
125  SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
126  SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
127  SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
128  SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
129  SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
130  SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
131  SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
132  SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
133  SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
134  SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
135 
137 #define SANITIZER_CHECK(Enum, Name, Version) Enum,
139 #undef SANITIZER_CHECK
140 };
141 
142 /// Helper class with most of the code for saving a value for a
143 /// conditional expression cleanup.
145  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
146 
147  /// Answer whether the given value needs extra work to be saved.
148  static bool needsSaving(llvm::Value *value) {
149  // If it's not an instruction, we don't need to save.
150  if (!isa<llvm::Instruction>(value)) return false;
151 
152  // If it's an instruction in the entry block, we don't need to save.
153  llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
154  return (block != &block->getParent()->getEntryBlock());
155  }
156 
157  static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
158  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
159 };
160 
161 /// A partial specialization of DominatingValue for llvm::Values that
162 /// might be llvm::Instructions.
163 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
164  typedef T *type;
165  static type restore(CodeGenFunction &CGF, saved_type value) {
166  return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
167  }
168 };
169 
170 /// A specialization of DominatingValue for Address.
171 template <> struct DominatingValue<Address> {
172  typedef Address type;
173 
174  struct saved_type {
177  };
178 
179  static bool needsSaving(type value) {
180  return DominatingLLVMValue::needsSaving(value.getPointer());
181  }
182  static saved_type save(CodeGenFunction &CGF, type value) {
183  return { DominatingLLVMValue::save(CGF, value.getPointer()),
184  value.getAlignment() };
185  }
186  static type restore(CodeGenFunction &CGF, saved_type value) {
187  return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
188  value.Alignment);
189  }
190 };
191 
192 /// A specialization of DominatingValue for RValue.
193 template <> struct DominatingValue<RValue> {
194  typedef RValue type;
195  class saved_type {
196  enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
197  AggregateAddress, ComplexAddress };
198 
200  unsigned K : 3;
201  unsigned Align : 29;
202  saved_type(llvm::Value *v, Kind k, unsigned a = 0)
203  : Value(v), K(k), Align(a) {}
204 
205  public:
206  static bool needsSaving(RValue value);
207  static saved_type save(CodeGenFunction &CGF, RValue value);
208  RValue restore(CodeGenFunction &CGF);
209 
210  // implementations in CGCleanup.cpp
211  };
212 
213  static bool needsSaving(type value) {
214  return saved_type::needsSaving(value);
215  }
216  static saved_type save(CodeGenFunction &CGF, type value) {
217  return saved_type::save(CGF, value);
218  }
219  static type restore(CodeGenFunction &CGF, saved_type value) {
220  return value.restore(CGF);
221  }
222 };
223 
224 /// CodeGenFunction - This class organizes the per-function state that is used
225 /// while generating LLVM code.
227  CodeGenFunction(const CodeGenFunction &) = delete;
228  void operator=(const CodeGenFunction &) = delete;
229 
230  friend class CGCXXABI;
231 public:
232  /// A jump destination is an abstract label, branching to which may
233  /// require a jump out through normal cleanups.
234  struct JumpDest {
235  JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
236  JumpDest(llvm::BasicBlock *Block,
238  unsigned Index)
239  : Block(Block), ScopeDepth(Depth), Index(Index) {}
240 
241  bool isValid() const { return Block != nullptr; }
242  llvm::BasicBlock *getBlock() const { return Block; }
243  EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
244  unsigned getDestIndex() const { return Index; }
245 
246  // This should be used cautiously.
248  ScopeDepth = depth;
249  }
250 
251  private:
252  llvm::BasicBlock *Block;
254  unsigned Index;
255  };
256 
257  CodeGenModule &CGM; // Per-module state.
259 
260  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
263 
264  // Stores variables for which we can't generate correct lifetime markers
265  // because of jumps.
267 
268  // CodeGen lambda for loops and support for ordered clause
269  typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
270  JumpDest)>
272  typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
273  const unsigned, const bool)>
275 
276  // Codegen lambda for loop bounds in worksharing loop constructs
277  typedef llvm::function_ref<std::pair<LValue, LValue>(
280 
281  // Codegen lambda for loop bounds in dispatch-based loop implementation
282  typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
283  CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
284  Address UB)>
286 
287  /// CGBuilder insert helper. This function is called after an
288  /// instruction is created using Builder.
289  void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
290  llvm::BasicBlock *BB,
291  llvm::BasicBlock::iterator InsertPt) const;
292 
293  /// CurFuncDecl - Holds the Decl for the current outermost
294  /// non-closure context.
296  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
300  llvm::Function *CurFn = nullptr;
301 
302  // Holds coroutine data if the current function is a coroutine. We use a
303  // wrapper to manage its lifetime, so that we don't have to define CGCoroData
304  // in this header.
305  struct CGCoroInfo {
306  std::unique_ptr<CGCoroData> Data;
307  CGCoroInfo();
308  ~CGCoroInfo();
309  };
311 
312  bool isCoroutine() const {
313  return CurCoro.Data != nullptr;
314  }
315 
316  /// CurGD - The GlobalDecl for the current function being compiled.
318 
319  /// PrologueCleanupDepth - The cleanup depth enclosing all the
320  /// cleanups associated with the parameters.
322 
323  /// ReturnBlock - Unified return block.
325 
326  /// ReturnValue - The temporary alloca to hold the return
327  /// value. This is invalid iff the function has no return value.
328  Address ReturnValue = Address::invalid();
329 
330  /// Return true if a label was seen in the current scope.
332  if (CurLexicalScope)
333  return CurLexicalScope->hasLabels();
334  return !LabelMap.empty();
335  }
336 
337  /// AllocaInsertPoint - This is an instruction in the entry block before which
338  /// we prefer to insert allocas.
339  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
340 
341  /// API for captured statement code generation.
343  public:
345  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
346  explicit CGCapturedStmtInfo(const CapturedStmt &S,
348  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
349 
353  E = S.capture_end();
354  I != E; ++I, ++Field) {
355  if (I->capturesThis())
356  CXXThisFieldDecl = *Field;
357  else if (I->capturesVariable())
358  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
359  else if (I->capturesVariableByCopy())
360  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
361  }
362  }
363 
364  virtual ~CGCapturedStmtInfo();
365 
366  CapturedRegionKind getKind() const { return Kind; }
367 
368  virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
369  // Retrieve the value of the context parameter.
370  virtual llvm::Value *getContextValue() const { return ThisValue; }
371 
372  /// Lookup the captured field decl for a variable.
373  virtual const FieldDecl *lookup(const VarDecl *VD) const {
374  return CaptureFields.lookup(VD->getCanonicalDecl());
375  }
376 
377  bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
378  virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
379 
380  static bool classof(const CGCapturedStmtInfo *) {
381  return true;
382  }
383 
384  /// Emit the captured statement body.
385  virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
387  CGF.EmitStmt(S);
388  }
389 
390  /// Get the name of the capture helper.
391  virtual StringRef getHelperName() const { return "__captured_stmt"; }
392 
393  private:
394  /// The kind of captured statement being generated.
396 
397  /// Keep the map between VarDecl and FieldDecl.
398  llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
399 
400  /// The base address of the captured record, passed in as the first
401  /// argument of the parallel region function.
402  llvm::Value *ThisValue;
403 
404  /// Captured 'this' type.
405  FieldDecl *CXXThisFieldDecl;
406  };
407  CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
408 
409  /// RAII for correct setting/restoring of CapturedStmtInfo.
411  private:
412  CodeGenFunction &CGF;
413  CGCapturedStmtInfo *PrevCapturedStmtInfo;
414  public:
415  CGCapturedStmtRAII(CodeGenFunction &CGF,
416  CGCapturedStmtInfo *NewCapturedStmtInfo)
417  : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
418  CGF.CapturedStmtInfo = NewCapturedStmtInfo;
419  }
420  ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
421  };
422 
423  /// An abstract representation of regular/ObjC call/message targets.
425  /// The function declaration of the callee.
426  const Decl *CalleeDecl;
427 
428  public:
429  AbstractCallee() : CalleeDecl(nullptr) {}
430  AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
431  AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
432  bool hasFunctionDecl() const {
433  return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
434  }
435  const Decl *getDecl() const { return CalleeDecl; }
436  unsigned getNumParams() const {
437  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
438  return FD->getNumParams();
439  return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
440  }
441  const ParmVarDecl *getParamDecl(unsigned I) const {
442  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
443  return FD->getParamDecl(I);
444  return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
445  }
446  };
447 
448  /// Sanitizers enabled for this function.
450 
451  /// True if CodeGen currently emits code implementing sanitizer checks.
452  bool IsSanitizerScope = false;
453 
454  /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
456  CodeGenFunction *CGF;
457  public:
458  SanitizerScope(CodeGenFunction *CGF);
459  ~SanitizerScope();
460  };
461 
462  /// In C++, whether we are code generating a thunk. This controls whether we
463  /// should emit cleanups.
464  bool CurFuncIsThunk = false;
465 
466  /// In ARC, whether we should autorelease the return value.
467  bool AutoreleaseResult = false;
468 
469  /// Whether we processed a Microsoft-style asm block during CodeGen. These can
470  /// potentially set the return value.
471  bool SawAsmBlock = false;
472 
473  const NamedDecl *CurSEHParent = nullptr;
474 
475  /// True if the current function is an outlined SEH helper. This can be a
476  /// finally block or filter expression.
477  bool IsOutlinedSEHHelper = false;
478 
479  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
480  llvm::Value *BlockPointer = nullptr;
481 
482  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
483  FieldDecl *LambdaThisCaptureField = nullptr;
484 
485  /// A mapping from NRVO variables to the flags used to indicate
486  /// when the NRVO has been applied to this variable.
487  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
488 
492 
493  llvm::Instruction *CurrentFuncletPad = nullptr;
494 
495  class CallLifetimeEnd final : public EHScopeStack::Cleanup {
496  llvm::Value *Addr;
497  llvm::Value *Size;
498 
499  public:
501  : Addr(addr.getPointer()), Size(size) {}
502 
503  void Emit(CodeGenFunction &CGF, Flags flags) override {
504  CGF.EmitLifetimeEnd(Size, Addr);
505  }
506  };
507 
508  /// Header for data within LifetimeExtendedCleanupStack.
510  /// The size of the following cleanup object.
511  unsigned Size;
512  /// The kind of cleanup to push: a value from the CleanupKind enumeration.
513  unsigned Kind : 31;
514  /// Whether this is a conditional cleanup.
515  unsigned IsConditional : 1;
516 
517  size_t getSize() const { return Size; }
518  CleanupKind getKind() const { return (CleanupKind)Kind; }
519  bool isConditional() const { return IsConditional; }
520  };
521 
522  /// i32s containing the indexes of the cleanup destinations.
523  Address NormalCleanupDest = Address::invalid();
524 
525  unsigned NextCleanupDestIndex = 1;
526 
527  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
528  CGBlockInfo *FirstBlockInfo = nullptr;
529 
530  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
531  llvm::BasicBlock *EHResumeBlock = nullptr;
532 
533  /// The exception slot. All landing pads write the current exception pointer
534  /// into this alloca.
535  llvm::Value *ExceptionSlot = nullptr;
536 
537  /// The selector slot. Under the MandatoryCleanup model, all landing pads
538  /// write the current selector value into this alloca.
539  llvm::AllocaInst *EHSelectorSlot = nullptr;
540 
541  /// A stack of exception code slots. Entering an __except block pushes a slot
542  /// on the stack and leaving pops one. The __exception_code() intrinsic loads
543  /// a value from the top of the stack.
545 
546  /// Value returned by __exception_info intrinsic.
547  llvm::Value *SEHInfo = nullptr;
548 
549  /// Emits a landing pad for the current EH stack.
550  llvm::BasicBlock *EmitLandingPad();
551 
552  llvm::BasicBlock *getInvokeDestImpl();
553 
554  template <class T>
556  return DominatingValue<T>::save(*this, value);
557  }
558 
559 public:
560  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
561  /// rethrows.
563 
564  /// A class controlling the emission of a finally block.
565  class FinallyInfo {
566  /// Where the catchall's edge through the cleanup should go.
567  JumpDest RethrowDest;
568 
569  /// A function to call to enter the catch.
570  llvm::Constant *BeginCatchFn;
571 
572  /// An i1 variable indicating whether or not the @finally is
573  /// running for an exception.
574  llvm::AllocaInst *ForEHVar;
575 
576  /// An i8* variable into which the exception pointer to rethrow
577  /// has been saved.
578  llvm::AllocaInst *SavedExnVar;
579 
580  public:
581  void enter(CodeGenFunction &CGF, const Stmt *Finally,
582  llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
583  llvm::Constant *rethrowFn);
584  void exit(CodeGenFunction &CGF);
585  };
586 
587  /// Returns true inside SEH __try blocks.
588  bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
589 
590  /// Returns true while emitting a cleanuppad.
591  bool isCleanupPadScope() const {
592  return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
593  }
594 
595  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
596  /// current full-expression. Safe against the possibility that
597  /// we're currently inside a conditionally-evaluated expression.
598  template <class T, class... As>
600  // If we're not in a conditional branch, or if none of the
601  // arguments requires saving, then use the unconditional cleanup.
602  if (!isInConditionalBranch())
603  return EHStack.pushCleanup<T>(kind, A...);
604 
605  // Stash values in a tuple so we can guarantee the order of saves.
606  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
607  SavedTuple Saved{saveValueInCond(A)...};
608 
609  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
610  EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
611  initFullExprCleanup();
612  }
613 
614  /// Queue a cleanup to be pushed after finishing the current
615  /// full-expression.
616  template <class T, class... As>
618  if (!isInConditionalBranch())
619  return pushCleanupAfterFullExprImpl<T>(Kind, Address::invalid(), A...);
620 
621  Address ActiveFlag = createCleanupActiveFlag();
622  assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
623  "cleanup active flag should never need saving");
624 
625  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
626  SavedTuple Saved{saveValueInCond(A)...};
627 
628  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
629  pushCleanupAfterFullExprImpl<CleanupType>(Kind, ActiveFlag, Saved);
630  }
631 
632  template <class T, class... As>
634  As... A) {
635  LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
636  ActiveFlag.isValid()};
637 
638  size_t OldSize = LifetimeExtendedCleanupStack.size();
639  LifetimeExtendedCleanupStack.resize(
640  LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
641  (Header.IsConditional ? sizeof(ActiveFlag) : 0));
642 
643  static_assert(sizeof(Header) % alignof(T) == 0,
644  "Cleanup will be allocated on misaligned address");
645  char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
646  new (Buffer) LifetimeExtendedCleanupHeader(Header);
647  new (Buffer + sizeof(Header)) T(A...);
648  if (Header.IsConditional)
649  new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
650  }
651 
652  /// Set up the last cleanup that was pushed as a conditional
653  /// full-expression cleanup.
655  initFullExprCleanupWithFlag(createCleanupActiveFlag());
656  }
657 
658  void initFullExprCleanupWithFlag(Address ActiveFlag);
659  Address createCleanupActiveFlag();
660 
661  /// PushDestructorCleanup - Push a cleanup to call the
662  /// complete-object destructor of an object of the given type at the
663  /// given address. Does nothing if T is not a C++ class type with a
664  /// non-trivial destructor.
665  void PushDestructorCleanup(QualType T, Address Addr);
666 
667  /// PushDestructorCleanup - Push a cleanup to call the
668  /// complete-object variant of the given destructor on the object at
669  /// the given address.
670  void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
671 
672  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
673  /// process all branch fixups.
674  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
675 
676  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
677  /// The block cannot be reactivated. Pops it if it's the top of the
678  /// stack.
679  ///
680  /// \param DominatingIP - An instruction which is known to
681  /// dominate the current IP (if set) and which lies along
682  /// all paths of execution between the current IP and the
683  /// the point at which the cleanup comes into scope.
684  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
685  llvm::Instruction *DominatingIP);
686 
687  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
688  /// Cannot be used to resurrect a deactivated cleanup.
689  ///
690  /// \param DominatingIP - An instruction which is known to
691  /// dominate the current IP (if set) and which lies along
692  /// all paths of execution between the current IP and the
693  /// the point at which the cleanup comes into scope.
694  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
695  llvm::Instruction *DominatingIP);
696 
697  /// Enters a new scope for capturing cleanups, all of which
698  /// will be executed once the scope is exited.
700  EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
701  size_t LifetimeExtendedCleanupStackSize;
702  bool OldDidCallStackSave;
703  protected:
705  private:
706 
707  RunCleanupsScope(const RunCleanupsScope &) = delete;
708  void operator=(const RunCleanupsScope &) = delete;
709 
710  protected:
711  CodeGenFunction& CGF;
712 
713  public:
714  /// Enter a new cleanup scope.
715  explicit RunCleanupsScope(CodeGenFunction &CGF)
716  : PerformCleanup(true), CGF(CGF)
717  {
718  CleanupStackDepth = CGF.EHStack.stable_begin();
719  LifetimeExtendedCleanupStackSize =
720  CGF.LifetimeExtendedCleanupStack.size();
721  OldDidCallStackSave = CGF.DidCallStackSave;
722  CGF.DidCallStackSave = false;
723  OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
724  CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
725  }
726 
727  /// Exit this cleanup scope, emitting any accumulated cleanups.
729  if (PerformCleanup)
730  ForceCleanup();
731  }
732 
733  /// Determine whether this scope requires any cleanups.
734  bool requiresCleanups() const {
735  return CGF.EHStack.stable_begin() != CleanupStackDepth;
736  }
737 
738  /// Force the emission of cleanups now, instead of waiting
739  /// until this object is destroyed.
740  /// \param ValuesToReload - A list of values that need to be available at
741  /// the insertion point after cleanup emission. If cleanup emission created
742  /// a shared cleanup block, these value pointers will be rewritten.
743  /// Otherwise, they not will be modified.
744  void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
745  assert(PerformCleanup && "Already forced cleanup");
746  CGF.DidCallStackSave = OldDidCallStackSave;
747  CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
748  ValuesToReload);
749  PerformCleanup = false;
750  CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
751  }
752  };
753 
754  // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
755  EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
756  EHScopeStack::stable_end();
757 
759  SourceRange Range;
761  LexicalScope *ParentScope;
762 
763  LexicalScope(const LexicalScope &) = delete;
764  void operator=(const LexicalScope &) = delete;
765 
766  public:
767  /// Enter a new cleanup scope.
768  explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
769  : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
770  CGF.CurLexicalScope = this;
771  if (CGDebugInfo *DI = CGF.getDebugInfo())
772  DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
773  }
774 
775  void addLabel(const LabelDecl *label) {
776  assert(PerformCleanup && "adding label to dead scope?");
777  Labels.push_back(label);
778  }
779 
780  /// Exit this cleanup scope, emitting any accumulated
781  /// cleanups.
783  if (CGDebugInfo *DI = CGF.getDebugInfo())
784  DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
785 
786  // If we should perform a cleanup, force them now. Note that
787  // this ends the cleanup scope before rescoping any labels.
788  if (PerformCleanup) {
789  ApplyDebugLocation DL(CGF, Range.getEnd());
790  ForceCleanup();
791  }
792  }
793 
794  /// Force the emission of cleanups now, instead of waiting
795  /// until this object is destroyed.
796  void ForceCleanup() {
797  CGF.CurLexicalScope = ParentScope;
798  RunCleanupsScope::ForceCleanup();
799 
800  if (!Labels.empty())
801  rescopeLabels();
802  }
803 
804  bool hasLabels() const {
805  return !Labels.empty();
806  }
807 
808  void rescopeLabels();
809  };
810 
811  typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
812 
813  /// The class used to assign some variables some temporarily addresses.
814  class OMPMapVars {
815  DeclMapTy SavedLocals;
816  DeclMapTy SavedTempAddresses;
817  OMPMapVars(const OMPMapVars &) = delete;
818  void operator=(const OMPMapVars &) = delete;
819 
820  public:
821  explicit OMPMapVars() = default;
823  assert(SavedLocals.empty() && "Did not restored original addresses.");
824  };
825 
826  /// Sets the address of the variable \p LocalVD to be \p TempAddr in
827  /// function \p CGF.
828  /// \return true if at least one variable was set already, false otherwise.
829  bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
830  Address TempAddr) {
831  LocalVD = LocalVD->getCanonicalDecl();
832  // Only save it once.
833  if (SavedLocals.count(LocalVD)) return false;
834 
835  // Copy the existing local entry to SavedLocals.
836  auto it = CGF.LocalDeclMap.find(LocalVD);
837  if (it != CGF.LocalDeclMap.end())
838  SavedLocals.try_emplace(LocalVD, it->second);
839  else
840  SavedLocals.try_emplace(LocalVD, Address::invalid());
841 
842  // Generate the private entry.
843  QualType VarTy = LocalVD->getType();
844  if (VarTy->isReferenceType()) {
845  Address Temp = CGF.CreateMemTemp(VarTy);
846  CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
847  TempAddr = Temp;
848  }
849  SavedTempAddresses.try_emplace(LocalVD, TempAddr);
850 
851  return true;
852  }
853 
854  /// Applies new addresses to the list of the variables.
855  /// \return true if at least one variable is using new address, false
856  /// otherwise.
857  bool apply(CodeGenFunction &CGF) {
858  copyInto(SavedTempAddresses, CGF.LocalDeclMap);
859  SavedTempAddresses.clear();
860  return !SavedLocals.empty();
861  }
862 
863  /// Restores original addresses of the variables.
864  void restore(CodeGenFunction &CGF) {
865  if (!SavedLocals.empty()) {
866  copyInto(SavedLocals, CGF.LocalDeclMap);
867  SavedLocals.clear();
868  }
869  }
870 
871  private:
872  /// Copy all the entries in the source map over the corresponding
873  /// entries in the destination, which must exist.
874  static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
875  for (auto &Pair : Src) {
876  if (!Pair.second.isValid()) {
877  Dest.erase(Pair.first);
878  continue;
879  }
880 
881  auto I = Dest.find(Pair.first);
882  if (I != Dest.end())
883  I->second = Pair.second;
884  else
885  Dest.insert(Pair);
886  }
887  }
888  };
889 
890  /// The scope used to remap some variables as private in the OpenMP loop body
891  /// (or other captured region emitted without outlining), and to restore old
892  /// vars back on exit.
894  OMPMapVars MappedVars;
895  OMPPrivateScope(const OMPPrivateScope &) = delete;
896  void operator=(const OMPPrivateScope &) = delete;
897 
898  public:
899  /// Enter a new OpenMP private scope.
900  explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
901 
902  /// Registers \p LocalVD variable as a private and apply \p PrivateGen
903  /// function for it to generate corresponding private variable. \p
904  /// PrivateGen returns an address of the generated private variable.
905  /// \return true if the variable is registered as private, false if it has
906  /// been privatized already.
907  bool addPrivate(const VarDecl *LocalVD,
908  const llvm::function_ref<Address()> PrivateGen) {
909  assert(PerformCleanup && "adding private to dead scope");
910  return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
911  }
912 
913  /// Privatizes local variables previously registered as private.
914  /// Registration is separate from the actual privatization to allow
915  /// initializers use values of the original variables, not the private one.
916  /// This is important, for example, if the private variable is a class
917  /// variable initialized by a constructor that references other private
918  /// variables. But at initialization original variables must be used, not
919  /// private copies.
920  /// \return true if at least one variable was privatized, false otherwise.
921  bool Privatize() { return MappedVars.apply(CGF); }
922 
923  void ForceCleanup() {
924  RunCleanupsScope::ForceCleanup();
925  MappedVars.restore(CGF);
926  }
927 
928  /// Exit scope - all the mapped variables are restored.
930  if (PerformCleanup)
931  ForceCleanup();
932  }
933 
934  /// Checks if the global variable is captured in current function.
935  bool isGlobalVarCaptured(const VarDecl *VD) const {
936  VD = VD->getCanonicalDecl();
937  return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
938  }
939  };
940 
941  /// Takes the old cleanup stack size and emits the cleanup blocks
942  /// that have been added.
943  void
944  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
945  std::initializer_list<llvm::Value **> ValuesToReload = {});
946 
947  /// Takes the old cleanup stack size and emits the cleanup blocks
948  /// that have been added, then adds all lifetime-extended cleanups from
949  /// the given position to the stack.
950  void
951  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
952  size_t OldLifetimeExtendedStackSize,
953  std::initializer_list<llvm::Value **> ValuesToReload = {});
954 
955  void ResolveBranchFixups(llvm::BasicBlock *Target);
956 
957  /// The given basic block lies in the current EH scope, but may be a
958  /// target of a potentially scope-crossing jump; get a stable handle
959  /// to which we can perform this jump later.
960  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
961  return JumpDest(Target,
962  EHStack.getInnermostNormalCleanup(),
963  NextCleanupDestIndex++);
964  }
965 
966  /// The given basic block lies in the current EH scope, but may be a
967  /// target of a potentially scope-crossing jump; get a stable handle
968  /// to which we can perform this jump later.
969  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
970  return getJumpDestInCurrentScope(createBasicBlock(Name));
971  }
972 
973  /// EmitBranchThroughCleanup - Emit a branch from the current insert
974  /// block through the normal cleanup handling code (if any) and then
975  /// on to \arg Dest.
976  void EmitBranchThroughCleanup(JumpDest Dest);
977 
978  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
979  /// specified destination obviously has no cleanups to run. 'false' is always
980  /// a conservatively correct answer for this method.
981  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
982 
983  /// popCatchScope - Pops the catch scope at the top of the EHScope
984  /// stack, emitting any required code (other than the catch handlers
985  /// themselves).
986  void popCatchScope();
987 
988  llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
989  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
990  llvm::BasicBlock *
991  getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
992 
993  /// An object to manage conditionally-evaluated expressions.
995  llvm::BasicBlock *StartBB;
996 
997  public:
998  ConditionalEvaluation(CodeGenFunction &CGF)
999  : StartBB(CGF.Builder.GetInsertBlock()) {}
1000 
1001  void begin(CodeGenFunction &CGF) {
1002  assert(CGF.OutermostConditional != this);
1003  if (!CGF.OutermostConditional)
1004  CGF.OutermostConditional = this;
1005  }
1006 
1007  void end(CodeGenFunction &CGF) {
1008  assert(CGF.OutermostConditional != nullptr);
1009  if (CGF.OutermostConditional == this)
1010  CGF.OutermostConditional = nullptr;
1011  }
1012 
1013  /// Returns a block which will be executed prior to each
1014  /// evaluation of the conditional code.
1015  llvm::BasicBlock *getStartingBlock() const {
1016  return StartBB;
1017  }
1018  };
1019 
1020  /// isInConditionalBranch - Return true if we're currently emitting
1021  /// one branch or the other of a conditional expression.
1022  bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1023 
1025  assert(isInConditionalBranch());
1026  llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1027  auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
1028  store->setAlignment(addr.getAlignment().getQuantity());
1029  }
1030 
1031  /// An RAII object to record that we're evaluating a statement
1032  /// expression.
1034  CodeGenFunction &CGF;
1035 
1036  /// We have to save the outermost conditional: cleanups in a
1037  /// statement expression aren't conditional just because the
1038  /// StmtExpr is.
1039  ConditionalEvaluation *SavedOutermostConditional;
1040 
1041  public:
1042  StmtExprEvaluation(CodeGenFunction &CGF)
1043  : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1044  CGF.OutermostConditional = nullptr;
1045  }
1046 
1048  CGF.OutermostConditional = SavedOutermostConditional;
1049  CGF.EnsureInsertPoint();
1050  }
1051  };
1052 
1053  /// An object which temporarily prevents a value from being
1054  /// destroyed by aggressive peephole optimizations that assume that
1055  /// all uses of a value have been realized in the IR.
1057  llvm::Instruction *Inst;
1058  friend class CodeGenFunction;
1059 
1060  public:
1061  PeepholeProtection() : Inst(nullptr) {}
1062  };
1063 
1064  /// A non-RAII class containing all the information about a bound
1065  /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1066  /// this which makes individual mappings very simple; using this
1067  /// class directly is useful when you have a variable number of
1068  /// opaque values or don't want the RAII functionality for some
1069  /// reason.
1071  const OpaqueValueExpr *OpaqueValue;
1072  bool BoundLValue;
1074 
1076  bool boundLValue)
1077  : OpaqueValue(ov), BoundLValue(boundLValue) {}
1078  public:
1079  OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1080 
1081  static bool shouldBindAsLValue(const Expr *expr) {
1082  // gl-values should be bound as l-values for obvious reasons.
1083  // Records should be bound as l-values because IR generation
1084  // always keeps them in memory. Expressions of function type
1085  // act exactly like l-values but are formally required to be
1086  // r-values in C.
1087  return expr->isGLValue() ||
1088  expr->getType()->isFunctionType() ||
1089  hasAggregateEvaluationKind(expr->getType());
1090  }
1091 
1092  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1093  const OpaqueValueExpr *ov,
1094  const Expr *e) {
1095  if (shouldBindAsLValue(ov))
1096  return bind(CGF, ov, CGF.EmitLValue(e));
1097  return bind(CGF, ov, CGF.EmitAnyExpr(e));
1098  }
1099 
1100  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1101  const OpaqueValueExpr *ov,
1102  const LValue &lv) {
1103  assert(shouldBindAsLValue(ov));
1104  CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1105  return OpaqueValueMappingData(ov, true);
1106  }
1107 
1108  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1109  const OpaqueValueExpr *ov,
1110  const RValue &rv) {
1111  assert(!shouldBindAsLValue(ov));
1112  CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1113 
1114  OpaqueValueMappingData data(ov, false);
1115 
1116  // Work around an extremely aggressive peephole optimization in
1117  // EmitScalarConversion which assumes that all other uses of a
1118  // value are extant.
1119  data.Protection = CGF.protectFromPeepholes(rv);
1120 
1121  return data;
1122  }
1123 
1124  bool isValid() const { return OpaqueValue != nullptr; }
1125  void clear() { OpaqueValue = nullptr; }
1126 
1127  void unbind(CodeGenFunction &CGF) {
1128  assert(OpaqueValue && "no data to unbind!");
1129 
1130  if (BoundLValue) {
1131  CGF.OpaqueLValues.erase(OpaqueValue);
1132  } else {
1133  CGF.OpaqueRValues.erase(OpaqueValue);
1134  CGF.unprotectFromPeepholes(Protection);
1135  }
1136  }
1137  };
1138 
1139  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1141  CodeGenFunction &CGF;
1143 
1144  public:
1145  static bool shouldBindAsLValue(const Expr *expr) {
1146  return OpaqueValueMappingData::shouldBindAsLValue(expr);
1147  }
1148 
1149  /// Build the opaque value mapping for the given conditional
1150  /// operator if it's the GNU ?: extension. This is a common
1151  /// enough pattern that the convenience operator is really
1152  /// helpful.
1153  ///
1154  OpaqueValueMapping(CodeGenFunction &CGF,
1155  const AbstractConditionalOperator *op) : CGF(CGF) {
1156  if (isa<ConditionalOperator>(op))
1157  // Leave Data empty.
1158  return;
1159 
1160  const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1161  Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1162  e->getCommon());
1163  }
1164 
1165  /// Build the opaque value mapping for an OpaqueValueExpr whose source
1166  /// expression is set to the expression the OVE represents.
1167  OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
1168  : CGF(CGF) {
1169  if (OV) {
1170  assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1171  "for OVE with no source expression");
1172  Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
1173  }
1174  }
1175 
1176  OpaqueValueMapping(CodeGenFunction &CGF,
1177  const OpaqueValueExpr *opaqueValue,
1178  LValue lvalue)
1179  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1180  }
1181 
1182  OpaqueValueMapping(CodeGenFunction &CGF,
1183  const OpaqueValueExpr *opaqueValue,
1184  RValue rvalue)
1185  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1186  }
1187 
1188  void pop() {
1189  Data.unbind(CGF);
1190  Data.clear();
1191  }
1192 
1194  if (Data.isValid()) Data.unbind(CGF);
1195  }
1196  };
1197 
1198 private:
1199  CGDebugInfo *DebugInfo;
1200  /// Used to create unique names for artificial VLA size debug info variables.
1201  unsigned VLAExprCounter = 0;
1202  bool DisableDebugInfo = false;
1203 
1204  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1205  /// calling llvm.stacksave for multiple VLAs in the same scope.
1206  bool DidCallStackSave = false;
1207 
1208  /// IndirectBranch - The first time an indirect goto is seen we create a block
1209  /// with an indirect branch. Every time we see the address of a label taken,
1210  /// we add the label to the indirect goto. Every subsequent indirect goto is
1211  /// codegen'd as a jump to the IndirectBranch's basic block.
1212  llvm::IndirectBrInst *IndirectBranch = nullptr;
1213 
1214  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1215  /// decls.
1216  DeclMapTy LocalDeclMap;
1217 
1218  // Keep track of the cleanups for callee-destructed parameters pushed to the
1219  // cleanup stack so that they can be deactivated later.
1220  llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1221  CalleeDestructedParamCleanups;
1222 
1223  /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1224  /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1225  /// parameter.
1226  llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1227  SizeArguments;
1228 
1229  /// Track escaped local variables with auto storage. Used during SEH
1230  /// outlining to produce a call to llvm.localescape.
1231  llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1232 
1233  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1234  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1235 
1236  // BreakContinueStack - This keeps track of where break and continue
1237  // statements should jump to.
1238  struct BreakContinue {
1239  BreakContinue(JumpDest Break, JumpDest Continue)
1240  : BreakBlock(Break), ContinueBlock(Continue) {}
1241 
1242  JumpDest BreakBlock;
1243  JumpDest ContinueBlock;
1244  };
1245  SmallVector<BreakContinue, 8> BreakContinueStack;
1246 
1247  /// Handles cancellation exit points in OpenMP-related constructs.
1248  class OpenMPCancelExitStack {
1249  /// Tracks cancellation exit point and join point for cancel-related exit
1250  /// and normal exit.
1251  struct CancelExit {
1252  CancelExit() = default;
1253  CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1254  JumpDest ContBlock)
1255  : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1257  /// true if the exit block has been emitted already by the special
1258  /// emitExit() call, false if the default codegen is used.
1259  bool HasBeenEmitted = false;
1260  JumpDest ExitBlock;
1261  JumpDest ContBlock;
1262  };
1263 
1265 
1266  public:
1267  OpenMPCancelExitStack() : Stack(1) {}
1268  ~OpenMPCancelExitStack() = default;
1269  /// Fetches the exit block for the current OpenMP construct.
1270  JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1271  /// Emits exit block with special codegen procedure specific for the related
1272  /// OpenMP construct + emits code for normal construct cleanup.
1273  void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1274  const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1275  if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1276  assert(CGF.getOMPCancelDestination(Kind).isValid());
1277  assert(CGF.HaveInsertPoint());
1278  assert(!Stack.back().HasBeenEmitted);
1279  auto IP = CGF.Builder.saveAndClearIP();
1280  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1281  CodeGen(CGF);
1282  CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1283  CGF.Builder.restoreIP(IP);
1284  Stack.back().HasBeenEmitted = true;
1285  }
1286  CodeGen(CGF);
1287  }
1288  /// Enter the cancel supporting \a Kind construct.
1289  /// \param Kind OpenMP directive that supports cancel constructs.
1290  /// \param HasCancel true, if the construct has inner cancel directive,
1291  /// false otherwise.
1292  void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1293  Stack.push_back({Kind,
1294  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1295  : JumpDest(),
1296  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1297  : JumpDest()});
1298  }
1299  /// Emits default exit point for the cancel construct (if the special one
1300  /// has not be used) + join point for cancel/normal exits.
1301  void exit(CodeGenFunction &CGF) {
1302  if (getExitBlock().isValid()) {
1303  assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1304  bool HaveIP = CGF.HaveInsertPoint();
1305  if (!Stack.back().HasBeenEmitted) {
1306  if (HaveIP)
1307  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1308  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1309  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1310  }
1311  CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1312  if (!HaveIP) {
1313  CGF.Builder.CreateUnreachable();
1314  CGF.Builder.ClearInsertionPoint();
1315  }
1316  }
1317  Stack.pop_back();
1318  }
1319  };
1320  OpenMPCancelExitStack OMPCancelStack;
1321 
1322  CodeGenPGO PGO;
1323 
1324  /// Calculate branch weights appropriate for PGO data
1325  llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount);
1326  llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights);
1327  llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1328  uint64_t LoopCount);
1329 
1330 public:
1331  /// Increment the profiler's counter for the given statement by \p StepV.
1332  /// If \p StepV is null, the default increment is 1.
1333  void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1335  PGO.emitCounterIncrement(Builder, S, StepV);
1336  PGO.setCurrentStmt(S);
1337  }
1338 
1339  /// Get the profiler's count for the given statement.
1340  uint64_t getProfileCount(const Stmt *S) {
1341  Optional<uint64_t> Count = PGO.getStmtCount(S);
1342  if (!Count.hasValue())
1343  return 0;
1344  return *Count;
1345  }
1346 
1347  /// Set the profiler's current count.
1348  void setCurrentProfileCount(uint64_t Count) {
1349  PGO.setCurrentRegionCount(Count);
1350  }
1351 
1352  /// Get the profiler's current count. This is generally the count for the most
1353  /// recently incremented counter.
1355  return PGO.getCurrentRegionCount();
1356  }
1357 
1358 private:
1359 
1360  /// SwitchInsn - This is nearest current switch instruction. It is null if
1361  /// current context is not in a switch.
1362  llvm::SwitchInst *SwitchInsn = nullptr;
1363  /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1364  SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1365 
1366  /// CaseRangeBlock - This block holds if condition check for last case
1367  /// statement range in current switch instruction.
1368  llvm::BasicBlock *CaseRangeBlock = nullptr;
1369 
1370  /// OpaqueLValues - Keeps track of the current set of opaque value
1371  /// expressions.
1372  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1373  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1374 
1375  // VLASizeMap - This keeps track of the associated size for each VLA type.
1376  // We track this by the size expression rather than the type itself because
1377  // in certain situations, like a const qualifier applied to an VLA typedef,
1378  // multiple VLA types can share the same size expression.
1379  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1380  // enter/leave scopes.
1381  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1382 
1383  /// A block containing a single 'unreachable' instruction. Created
1384  /// lazily by getUnreachableBlock().
1385  llvm::BasicBlock *UnreachableBlock = nullptr;
1386 
1387  /// Counts of the number return expressions in the function.
1388  unsigned NumReturnExprs = 0;
1389 
1390  /// Count the number of simple (constant) return expressions in the function.
1391  unsigned NumSimpleReturnExprs = 0;
1392 
1393  /// The last regular (non-return) debug location (breakpoint) in the function.
1394  SourceLocation LastStopPoint;
1395 
1396 public:
1397  /// A scope within which we are constructing the fields of an object which
1398  /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1399  /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1401  public:
1402  FieldConstructionScope(CodeGenFunction &CGF, Address This)
1403  : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1404  CGF.CXXDefaultInitExprThis = This;
1405  }
1407  CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1408  }
1409 
1410  private:
1411  CodeGenFunction &CGF;
1412  Address OldCXXDefaultInitExprThis;
1413  };
1414 
1415  /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1416  /// is overridden to be the object under construction.
1418  public:
1419  CXXDefaultInitExprScope(CodeGenFunction &CGF)
1420  : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1421  OldCXXThisAlignment(CGF.CXXThisAlignment) {
1422  CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
1423  CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1424  }
1426  CGF.CXXThisValue = OldCXXThisValue;
1427  CGF.CXXThisAlignment = OldCXXThisAlignment;
1428  }
1429 
1430  public:
1431  CodeGenFunction &CGF;
1434  };
1435 
1436  /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1437  /// current loop index is overridden.
1439  public:
1440  ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1441  : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1442  CGF.ArrayInitIndex = Index;
1443  }
1445  CGF.ArrayInitIndex = OldArrayInitIndex;
1446  }
1447 
1448  private:
1449  CodeGenFunction &CGF;
1450  llvm::Value *OldArrayInitIndex;
1451  };
1452 
1454  public:
1456  : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1457  OldCurCodeDecl(CGF.CurCodeDecl),
1458  OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1459  OldCXXABIThisValue(CGF.CXXABIThisValue),
1460  OldCXXThisValue(CGF.CXXThisValue),
1461  OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1462  OldCXXThisAlignment(CGF.CXXThisAlignment),
1463  OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1464  OldCXXInheritedCtorInitExprArgs(
1465  std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1466  CGF.CurGD = GD;
1467  CGF.CurFuncDecl = CGF.CurCodeDecl =
1468  cast<CXXConstructorDecl>(GD.getDecl());
1469  CGF.CXXABIThisDecl = nullptr;
1470  CGF.CXXABIThisValue = nullptr;
1471  CGF.CXXThisValue = nullptr;
1472  CGF.CXXABIThisAlignment = CharUnits();
1473  CGF.CXXThisAlignment = CharUnits();
1474  CGF.ReturnValue = Address::invalid();
1475  CGF.FnRetTy = QualType();
1476  CGF.CXXInheritedCtorInitExprArgs.clear();
1477  }
1479  CGF.CurGD = OldCurGD;
1480  CGF.CurFuncDecl = OldCurFuncDecl;
1481  CGF.CurCodeDecl = OldCurCodeDecl;
1482  CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1483  CGF.CXXABIThisValue = OldCXXABIThisValue;
1484  CGF.CXXThisValue = OldCXXThisValue;
1485  CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1486  CGF.CXXThisAlignment = OldCXXThisAlignment;
1487  CGF.ReturnValue = OldReturnValue;
1488  CGF.FnRetTy = OldFnRetTy;
1489  CGF.CXXInheritedCtorInitExprArgs =
1490  std::move(OldCXXInheritedCtorInitExprArgs);
1491  }
1492 
1493  private:
1494  CodeGenFunction &CGF;
1495  GlobalDecl OldCurGD;
1496  const Decl *OldCurFuncDecl;
1497  const Decl *OldCurCodeDecl;
1498  ImplicitParamDecl *OldCXXABIThisDecl;
1499  llvm::Value *OldCXXABIThisValue;
1500  llvm::Value *OldCXXThisValue;
1501  CharUnits OldCXXABIThisAlignment;
1502  CharUnits OldCXXThisAlignment;
1503  Address OldReturnValue;
1504  QualType OldFnRetTy;
1505  CallArgList OldCXXInheritedCtorInitExprArgs;
1506  };
1507 
1508 private:
1509  /// CXXThisDecl - When generating code for a C++ member function,
1510  /// this will hold the implicit 'this' declaration.
1511  ImplicitParamDecl *CXXABIThisDecl = nullptr;
1512  llvm::Value *CXXABIThisValue = nullptr;
1513  llvm::Value *CXXThisValue = nullptr;
1514  CharUnits CXXABIThisAlignment;
1515  CharUnits CXXThisAlignment;
1516 
1517  /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
1518  /// this expression.
1519  Address CXXDefaultInitExprThis = Address::invalid();
1520 
1521  /// The current array initialization index when evaluating an
1522  /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
1523  llvm::Value *ArrayInitIndex = nullptr;
1524 
1525  /// The values of function arguments to use when evaluating
1526  /// CXXInheritedCtorInitExprs within this context.
1527  CallArgList CXXInheritedCtorInitExprArgs;
1528 
1529  /// CXXStructorImplicitParamDecl - When generating code for a constructor or
1530  /// destructor, this will hold the implicit argument (e.g. VTT).
1531  ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
1532  llvm::Value *CXXStructorImplicitParamValue = nullptr;
1533 
1534  /// OutermostConditional - Points to the outermost active
1535  /// conditional control. This is used so that we know if a
1536  /// temporary should be destroyed conditionally.
1537  ConditionalEvaluation *OutermostConditional = nullptr;
1538 
1539  /// The current lexical scope.
1540  LexicalScope *CurLexicalScope = nullptr;
1541 
1542  /// The current source location that should be used for exception
1543  /// handling code.
1544  SourceLocation CurEHLocation;
1545 
1546  /// BlockByrefInfos - For each __block variable, contains
1547  /// information about the layout of the variable.
1548  llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
1549 
1550  /// Used by -fsanitize=nullability-return to determine whether the return
1551  /// value can be checked.
1552  llvm::Value *RetValNullabilityPrecondition = nullptr;
1553 
1554  /// Check if -fsanitize=nullability-return instrumentation is required for
1555  /// this function.
1556  bool requiresReturnValueNullabilityCheck() const {
1557  return RetValNullabilityPrecondition;
1558  }
1559 
1560  /// Used to store precise source locations for return statements by the
1561  /// runtime return value checks.
1562  Address ReturnLocation = Address::invalid();
1563 
1564  /// Check if the return value of this function requires sanitization.
1565  bool requiresReturnValueCheck() const {
1566  return requiresReturnValueNullabilityCheck() ||
1567  (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1568  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>());
1569  }
1570 
1571  llvm::BasicBlock *TerminateLandingPad = nullptr;
1572  llvm::BasicBlock *TerminateHandler = nullptr;
1573  llvm::BasicBlock *TrapBB = nullptr;
1574 
1575  /// Terminate funclets keyed by parent funclet pad.
1576  llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
1577 
1578  /// Largest vector width used in ths function. Will be used to create a
1579  /// function attribute.
1580  unsigned LargestVectorWidth = 0;
1581 
1582  /// True if we need emit the life-time markers.
1583  const bool ShouldEmitLifetimeMarkers;
1584 
1585  /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
1586  /// the function metadata.
1587  void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
1588  llvm::Function *Fn);
1589 
1590 public:
1591  CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1592  ~CodeGenFunction();
1593 
1594  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1595  ASTContext &getContext() const { return CGM.getContext(); }
1597  if (DisableDebugInfo)
1598  return nullptr;
1599  return DebugInfo;
1600  }
1601  void disableDebugInfo() { DisableDebugInfo = true; }
1602  void enableDebugInfo() { DisableDebugInfo = false; }
1603 
1605  return CGM.getCodeGenOpts().OptimizationLevel == 0;
1606  }
1607 
1608  const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1609 
1610  /// Returns a pointer to the function's exception object and selector slot,
1611  /// which is assigned in every landing pad.
1612  Address getExceptionSlot();
1613  Address getEHSelectorSlot();
1614 
1615  /// Returns the contents of the function's exception object and selector
1616  /// slots.
1617  llvm::Value *getExceptionFromSlot();
1618  llvm::Value *getSelectorFromSlot();
1619 
1620  Address getNormalCleanupDestSlot();
1621 
1622  llvm::BasicBlock *getUnreachableBlock() {
1623  if (!UnreachableBlock) {
1624  UnreachableBlock = createBasicBlock("unreachable");
1625  new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1626  }
1627  return UnreachableBlock;
1628  }
1629 
1630  llvm::BasicBlock *getInvokeDest() {
1631  if (!EHStack.requiresLandingPad()) return nullptr;
1632  return getInvokeDestImpl();
1633  }
1634 
1635  bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; }
1636 
1637  const TargetInfo &getTarget() const { return Target; }
1638  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1640  return CGM.getTargetCodeGenInfo();
1641  }
1642 
1643  //===--------------------------------------------------------------------===//
1644  // Cleanups
1645  //===--------------------------------------------------------------------===//
1646 
1647  typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
1648 
1649  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1650  Address arrayEndPointer,
1651  QualType elementType,
1652  CharUnits elementAlignment,
1653  Destroyer *destroyer);
1654  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1655  llvm::Value *arrayEnd,
1656  QualType elementType,
1657  CharUnits elementAlignment,
1658  Destroyer *destroyer);
1659 
1660  void pushDestroy(QualType::DestructionKind dtorKind,
1661  Address addr, QualType type);
1662  void pushEHDestroy(QualType::DestructionKind dtorKind,
1663  Address addr, QualType type);
1664  void pushDestroy(CleanupKind kind, Address addr, QualType type,
1665  Destroyer *destroyer, bool useEHCleanupForArray);
1666  void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
1667  QualType type, Destroyer *destroyer,
1668  bool useEHCleanupForArray);
1669  void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1670  llvm::Value *CompletePtr,
1671  QualType ElementType);
1672  void pushStackRestore(CleanupKind kind, Address SPMem);
1673  void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
1674  bool useEHCleanupForArray);
1675  llvm::Function *generateDestroyHelper(Address addr, QualType type,
1676  Destroyer *destroyer,
1677  bool useEHCleanupForArray,
1678  const VarDecl *VD);
1679  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1680  QualType elementType, CharUnits elementAlign,
1681  Destroyer *destroyer,
1682  bool checkZeroLength, bool useEHCleanup);
1683 
1684  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1685 
1686  /// Determines whether an EH cleanup is required to destroy a type
1687  /// with the given destruction kind.
1689  switch (kind) {
1690  case QualType::DK_none:
1691  return false;
1692  case QualType::DK_cxx_destructor:
1693  case QualType::DK_objc_weak_lifetime:
1694  case QualType::DK_nontrivial_c_struct:
1695  return getLangOpts().Exceptions;
1696  case QualType::DK_objc_strong_lifetime:
1697  return getLangOpts().Exceptions &&
1698  CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1699  }
1700  llvm_unreachable("bad destruction kind");
1701  }
1702 
1704  return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1705  }
1706 
1707  //===--------------------------------------------------------------------===//
1708  // Objective-C
1709  //===--------------------------------------------------------------------===//
1710 
1711  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1712 
1713  void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
1714 
1715  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1716  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1717  const ObjCPropertyImplDecl *PID);
1718  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1719  const ObjCPropertyImplDecl *propImpl,
1720  const ObjCMethodDecl *GetterMothodDecl,
1721  llvm::Constant *AtomicHelperFn);
1722 
1723  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1724  ObjCMethodDecl *MD, bool ctor);
1725 
1726  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1727  /// for the given property.
1728  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1729  const ObjCPropertyImplDecl *PID);
1730  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1731  const ObjCPropertyImplDecl *propImpl,
1732  llvm::Constant *AtomicHelperFn);
1733 
1734  //===--------------------------------------------------------------------===//
1735  // Block Bits
1736  //===--------------------------------------------------------------------===//
1737 
1738  /// Emit block literal.
1739  /// \return an LLVM value which is a pointer to a struct which contains
1740  /// information about the block, including the block invoke function, the
1741  /// captured variables, etc.
1742  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1743  static void destroyBlockInfos(CGBlockInfo *info);
1744 
1745  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1746  const CGBlockInfo &Info,
1747  const DeclMapTy &ldm,
1748  bool IsLambdaConversionToBlock,
1749  bool BuildGlobalBlock);
1750 
1751  /// Check if \p T is a C++ class that has a destructor that can throw.
1752  static bool cxxDestructorCanThrow(QualType T);
1753 
1754  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1755  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1756  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1757  const ObjCPropertyImplDecl *PID);
1758  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1759  const ObjCPropertyImplDecl *PID);
1760  llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1761 
1762  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
1763  bool CanThrow);
1764 
1765  class AutoVarEmission;
1766 
1767  void emitByrefStructureInit(const AutoVarEmission &emission);
1768 
1769  /// Enter a cleanup to destroy a __block variable. Note that this
1770  /// cleanup should be a no-op if the variable hasn't left the stack
1771  /// yet; if a cleanup is required for the variable itself, that needs
1772  /// to be done externally.
1773  ///
1774  /// \param Kind Cleanup kind.
1775  ///
1776  /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
1777  /// structure that will be passed to _Block_object_dispose. When
1778  /// \p LoadBlockVarAddr is true, the address of the field of the block
1779  /// structure that holds the address of the __block structure.
1780  ///
1781  /// \param Flags The flag that will be passed to _Block_object_dispose.
1782  ///
1783  /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
1784  /// \p Addr to get the address of the __block structure.
1785  void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
1786  bool LoadBlockVarAddr, bool CanThrow);
1787 
1788  void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
1789  llvm::Value *ptr);
1790 
1791  Address LoadBlockStruct();
1792  Address GetAddrOfBlockDecl(const VarDecl *var);
1793 
1794  /// BuildBlockByrefAddress - Computes the location of the
1795  /// data in a variable which is declared as __block.
1796  Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
1797  bool followForward = true);
1798  Address emitBlockByrefAddress(Address baseAddr,
1799  const BlockByrefInfo &info,
1800  bool followForward,
1801  const llvm::Twine &name);
1802 
1803  const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
1804 
1805  QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
1806 
1807  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1808  const CGFunctionInfo &FnInfo);
1809 
1810  /// Annotate the function with an attribute that disables TSan checking at
1811  /// runtime.
1812  void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
1813 
1814  /// Emit code for the start of a function.
1815  /// \param Loc The location to be associated with the function.
1816  /// \param StartLoc The location of the function body.
1817  void StartFunction(GlobalDecl GD,
1818  QualType RetTy,
1819  llvm::Function *Fn,
1820  const CGFunctionInfo &FnInfo,
1821  const FunctionArgList &Args,
1823  SourceLocation StartLoc = SourceLocation());
1824 
1825  static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
1826 
1827  void EmitConstructorBody(FunctionArgList &Args);
1828  void EmitDestructorBody(FunctionArgList &Args);
1829  void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
1830  void EmitFunctionBody(const Stmt *Body);
1831  void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
1832 
1833  void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
1834  CallArgList &CallArgs);
1835  void EmitLambdaBlockInvokeBody();
1836  void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1837  void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
1839  EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
1840  }
1841  void EmitAsanPrologueOrEpilogue(bool Prologue);
1842 
1843  /// Emit the unified return block, trying to avoid its emission when
1844  /// possible.
1845  /// \return The debug location of the user written return statement if the
1846  /// return block is is avoided.
1847  llvm::DebugLoc EmitReturnBlock();
1848 
1849  /// FinishFunction - Complete IR generation of the current function. It is
1850  /// legal to call this function even if there is no current insertion point.
1851  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1852 
1853  void StartThunk(llvm::Function *Fn, GlobalDecl GD,
1854  const CGFunctionInfo &FnInfo, bool IsUnprototyped);
1855 
1856  void EmitCallAndReturnForThunk(llvm::Constant *Callee, const ThunkInfo *Thunk,
1857  bool IsUnprototyped);
1858 
1859  void FinishThunk();
1860 
1861  /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
1862  void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
1863  llvm::Value *Callee);
1864 
1865  /// Generate a thunk for the given method.
1866  void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1867  GlobalDecl GD, const ThunkInfo &Thunk,
1868  bool IsUnprototyped);
1869 
1870  llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
1871  const CGFunctionInfo &FnInfo,
1872  GlobalDecl GD, const ThunkInfo &Thunk);
1873 
1874  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1875  FunctionArgList &Args);
1876 
1877  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
1878 
1879  /// Struct with all information about dynamic [sub]class needed to set vptr.
1880  struct VPtr {
1885  };
1886 
1887  /// Initialize the vtable pointer of the given subobject.
1888  void InitializeVTablePointer(const VPtr &vptr);
1889 
1891 
1892  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1893  VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
1894 
1895  void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
1896  CharUnits OffsetFromNearestVBase,
1897  bool BaseIsNonVirtualPrimaryBase,
1898  const CXXRecordDecl *VTableClass,
1899  VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
1900 
1901  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1902 
1903  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1904  /// to by This.
1905  llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
1906  const CXXRecordDecl *VTableClass);
1907 
1916  };
1917 
1918  /// Derived is the presumed address of an object of type T after a
1919  /// cast. If T is a polymorphic class type, emit a check that the virtual
1920  /// table for Derived belongs to a class derived from T.
1921  void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
1922  bool MayBeNull, CFITypeCheckKind TCK,
1923  SourceLocation Loc);
1924 
1925  /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
1926  /// If vptr CFI is enabled, emit a check that VTable is valid.
1927  void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
1928  CFITypeCheckKind TCK, SourceLocation Loc);
1929 
1930  /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
1931  /// RD using llvm.type.test.
1932  void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
1933  CFITypeCheckKind TCK, SourceLocation Loc);
1934 
1935  /// If whole-program virtual table optimization is enabled, emit an assumption
1936  /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
1937  /// enabled, emit a check that VTable is a member of RD's type identifier.
1938  void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
1939  llvm::Value *VTable, SourceLocation Loc);
1940 
1941  /// Returns whether we should perform a type checked load when loading a
1942  /// virtual function for virtual calls to members of RD. This is generally
1943  /// true when both vcall CFI and whole-program-vtables are enabled.
1944  bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
1945 
1946  /// Emit a type checked load from the given vtable.
1947  llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
1948  uint64_t VTableByteOffset);
1949 
1950  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1951  /// given phase of destruction for a destructor. The end result
1952  /// should call destructors on members and base classes in reverse
1953  /// order of their construction.
1954  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1955 
1956  /// ShouldInstrumentFunction - Return true if the current function should be
1957  /// instrumented with __cyg_profile_func_* calls
1958  bool ShouldInstrumentFunction();
1959 
1960  /// ShouldXRayInstrument - Return true if the current function should be
1961  /// instrumented with XRay nop sleds.
1962  bool ShouldXRayInstrumentFunction() const;
1963 
1964  /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
1965  /// XRay custom event handling calls.
1966  bool AlwaysEmitXRayCustomEvents() const;
1967 
1968  /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
1969  /// XRay typed event handling calls.
1970  bool AlwaysEmitXRayTypedEvents() const;
1971 
1972  /// Encode an address into a form suitable for use in a function prologue.
1973  llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
1974  llvm::Constant *Addr);
1975 
1976  /// Decode an address used in a function prologue, encoded by \c
1977  /// EncodeAddrForUseInPrologue.
1978  llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
1979  llvm::Value *EncodedAddr);
1980 
1981  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1982  /// arguments for the given function. This is also responsible for naming the
1983  /// LLVM function arguments.
1984  void EmitFunctionProlog(const CGFunctionInfo &FI,
1985  llvm::Function *Fn,
1986  const FunctionArgList &Args);
1987 
1988  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1989  /// given temporary.
1990  void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
1991  SourceLocation EndLoc);
1992 
1993  /// Emit a test that checks if the return value \p RV is nonnull.
1994  void EmitReturnValueCheck(llvm::Value *RV);
1995 
1996  /// EmitStartEHSpec - Emit the start of the exception spec.
1997  void EmitStartEHSpec(const Decl *D);
1998 
1999  /// EmitEndEHSpec - Emit the end of the exception spec.
2000  void EmitEndEHSpec(const Decl *D);
2001 
2002  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2003  llvm::BasicBlock *getTerminateLandingPad();
2004 
2005  /// getTerminateLandingPad - Return a cleanup funclet that just calls
2006  /// terminate.
2007  llvm::BasicBlock *getTerminateFunclet();
2008 
2009  /// getTerminateHandler - Return a handler (not a landing pad, just
2010  /// a catch handler) that just calls terminate. This is used when
2011  /// a terminate scope encloses a try.
2012  llvm::BasicBlock *getTerminateHandler();
2013 
2014  llvm::Type *ConvertTypeForMem(QualType T);
2015  llvm::Type *ConvertType(QualType T);
2016  llvm::Type *ConvertType(const TypeDecl *T) {
2017  return ConvertType(getContext().getTypeDeclType(T));
2018  }
2019 
2020  /// LoadObjCSelf - Load the value of self. This function is only valid while
2021  /// generating code for an Objective-C method.
2022  llvm::Value *LoadObjCSelf();
2023 
2024  /// TypeOfSelfObject - Return type of object that this self represents.
2025  QualType TypeOfSelfObject();
2026 
2027  /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2028  static TypeEvaluationKind getEvaluationKind(QualType T);
2029 
2031  return getEvaluationKind(T) == TEK_Scalar;
2032  }
2033 
2035  return getEvaluationKind(T) == TEK_Aggregate;
2036  }
2037 
2038  /// createBasicBlock - Create an LLVM basic block.
2039  llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2040  llvm::Function *parent = nullptr,
2041  llvm::BasicBlock *before = nullptr) {
2042  return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2043  }
2044 
2045  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2046  /// label maps to.
2047  JumpDest getJumpDestForLabel(const LabelDecl *S);
2048 
2049  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2050  /// another basic block, simplify it. This assumes that no other code could
2051  /// potentially reference the basic block.
2052  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2053 
2054  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2055  /// adding a fall-through branch from the current insert block if
2056  /// necessary. It is legal to call this function even if there is no current
2057  /// insertion point.
2058  ///
2059  /// IsFinished - If true, indicates that the caller has finished emitting
2060  /// branches to the given block and does not expect to emit code into it. This
2061  /// means the block can be ignored if it is unreachable.
2062  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2063 
2064  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2065  /// near its uses, and leave the insertion point in it.
2066  void EmitBlockAfterUses(llvm::BasicBlock *BB);
2067 
2068  /// EmitBranch - Emit a branch to the specified basic block from the current
2069  /// insert block, taking care to avoid creation of branches from dummy
2070  /// blocks. It is legal to call this function even if there is no current
2071  /// insertion point.
2072  ///
2073  /// This function clears the current insertion point. The caller should follow
2074  /// calls to this function with calls to Emit*Block prior to generation new
2075  /// code.
2076  void EmitBranch(llvm::BasicBlock *Block);
2077 
2078  /// HaveInsertPoint - True if an insertion point is defined. If not, this
2079  /// indicates that the current code being emitted is unreachable.
2080  bool HaveInsertPoint() const {
2081  return Builder.GetInsertBlock() != nullptr;
2082  }
2083 
2084  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2085  /// emitted IR has a place to go. Note that by definition, if this function
2086  /// creates a block then that block is unreachable; callers may do better to
2087  /// detect when no insertion point is defined and simply skip IR generation.
2089  if (!HaveInsertPoint())
2090  EmitBlock(createBasicBlock());
2091  }
2092 
2093  /// ErrorUnsupported - Print out an error that codegen doesn't support the
2094  /// specified stmt yet.
2095  void ErrorUnsupported(const Stmt *S, const char *Type);
2096 
2097  //===--------------------------------------------------------------------===//
2098  // Helpers
2099  //===--------------------------------------------------------------------===//
2100 
2103  return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2104  CGM.getTBAAAccessInfo(T));
2105  }
2106 
2108  TBAAAccessInfo TBAAInfo) {
2109  return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2110  }
2111 
2114  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
2115  LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
2116  }
2117 
2119  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
2120  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
2121  BaseInfo, TBAAInfo);
2122  }
2123 
2124  LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
2125  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
2126  CharUnits getNaturalTypeAlignment(QualType T,
2127  LValueBaseInfo *BaseInfo = nullptr,
2128  TBAAAccessInfo *TBAAInfo = nullptr,
2129  bool forPointeeType = false);
2130  CharUnits getNaturalPointeeTypeAlignment(QualType T,
2131  LValueBaseInfo *BaseInfo = nullptr,
2132  TBAAAccessInfo *TBAAInfo = nullptr);
2133 
2134  Address EmitLoadOfReference(LValue RefLVal,
2135  LValueBaseInfo *PointeeBaseInfo = nullptr,
2136  TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2137  LValue EmitLoadOfReferenceLValue(LValue RefLVal);
2139  AlignmentSource Source =
2141  LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2142  CGM.getTBAAAccessInfo(RefTy));
2143  return EmitLoadOfReferenceLValue(RefLVal);
2144  }
2145 
2146  Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2147  LValueBaseInfo *BaseInfo = nullptr,
2148  TBAAAccessInfo *TBAAInfo = nullptr);
2149  LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2150 
2151  /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2152  /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2153  /// insertion point of the builder. The caller is responsible for setting an
2154  /// appropriate alignment on
2155  /// the alloca.
2156  ///
2157  /// \p ArraySize is the number of array elements to be allocated if it
2158  /// is not nullptr.
2159  ///
2160  /// LangAS::Default is the address space of pointers to local variables and
2161  /// temporaries, as exposed in the source language. In certain
2162  /// configurations, this is not the same as the alloca address space, and a
2163  /// cast is needed to lift the pointer from the alloca AS into
2164  /// LangAS::Default. This can happen when the target uses a restricted
2165  /// address space for the stack but the source language requires
2166  /// LangAS::Default to be a generic address space. The latter condition is
2167  /// common for most programming languages; OpenCL is an exception in that
2168  /// LangAS::Default is the private address space, which naturally maps
2169  /// to the stack.
2170  ///
2171  /// Because the address of a temporary is often exposed to the program in
2172  /// various ways, this function will perform the cast. The original alloca
2173  /// instruction is returned through \p Alloca if it is not nullptr.
2174  ///
2175  /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2176  /// more efficient if the caller knows that the address will not be exposed.
2177  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2178  llvm::Value *ArraySize = nullptr);
2179  Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
2180  const Twine &Name = "tmp",
2181  llvm::Value *ArraySize = nullptr,
2182  Address *Alloca = nullptr);
2183  Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2184  const Twine &Name = "tmp",
2185  llvm::Value *ArraySize = nullptr);
2186 
2187  /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2188  /// default ABI alignment of the given LLVM type.
2189  ///
2190  /// IMPORTANT NOTE: This is *not* generally the right alignment for
2191  /// any given AST type that happens to have been lowered to the
2192  /// given IR type. This should only ever be used for function-local,
2193  /// IR-driven manipulations like saving and restoring a value. Do
2194  /// not hand this address off to arbitrary IRGen routines, and especially
2195  /// do not pass it as an argument to a function that might expect a
2196  /// properly ABI-aligned value.
2197  Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2198  const Twine &Name = "tmp");
2199 
2200  /// InitTempAlloca - Provide an initial value for the given alloca which
2201  /// will be observable at all locations in the function.
2202  ///
2203  /// The address should be something that was returned from one of
2204  /// the CreateTempAlloca or CreateMemTemp routines, and the
2205  /// initializer must be valid in the entry block (i.e. it must
2206  /// either be a constant or an argument value).
2207  void InitTempAlloca(Address Alloca, llvm::Value *Value);
2208 
2209  /// CreateIRTemp - Create a temporary IR object of the given type, with
2210  /// appropriate alignment. This routine should only be used when an temporary
2211  /// value needs to be stored into an alloca (for example, to avoid explicit
2212  /// PHI construction), but the type is the IR type, not the type appropriate
2213  /// for storing in memory.
2214  ///
2215  /// That is, this is exactly equivalent to CreateMemTemp, but calling
2216  /// ConvertType instead of ConvertTypeForMem.
2217  Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
2218 
2219  /// CreateMemTemp - Create a temporary memory object of the given type, with
2220  /// appropriate alignmen and cast it to the default address space. Returns
2221  /// the original alloca instruction by \p Alloca if it is not nullptr.
2222  Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
2223  Address *Alloca = nullptr);
2224  Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
2225  Address *Alloca = nullptr);
2226 
2227  /// CreateMemTemp - Create a temporary memory object of the given type, with
2228  /// appropriate alignmen without casting it to the default address space.
2229  Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2230  Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
2231  const Twine &Name = "tmp");
2232 
2233  /// CreateAggTemp - Create a temporary memory object for the given
2234  /// aggregate type.
2235  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
2236  return AggValueSlot::forAddr(CreateMemTemp(T, Name),
2237  T.getQualifiers(),
2238  AggValueSlot::IsNotDestructed,
2239  AggValueSlot::DoesNotNeedGCBarriers,
2240  AggValueSlot::IsNotAliased,
2241  AggValueSlot::DoesNotOverlap);
2242  }
2243 
2244  /// Emit a cast to void* in the appropriate address space.
2245  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
2246 
2247  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2248  /// expression and compare the result against zero, returning an Int1Ty value.
2249  llvm::Value *EvaluateExprAsBool(const Expr *E);
2250 
2251  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2252  void EmitIgnoredExpr(const Expr *E);
2253 
2254  /// EmitAnyExpr - Emit code to compute the specified expression which can have
2255  /// any type. The result is returned as an RValue struct. If this is an
2256  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2257  /// the result should be returned.
2258  ///
2259  /// \param ignoreResult True if the resulting value isn't used.
2260  RValue EmitAnyExpr(const Expr *E,
2261  AggValueSlot aggSlot = AggValueSlot::ignored(),
2262  bool ignoreResult = false);
2263 
2264  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2265  // or the value of the expression, depending on how va_list is defined.
2266  Address EmitVAListRef(const Expr *E);
2267 
2268  /// Emit a "reference" to a __builtin_ms_va_list; this is
2269  /// always the value of the expression, because a __builtin_ms_va_list is a
2270  /// pointer to a char.
2271  Address EmitMSVAListRef(const Expr *E);
2272 
2273  /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2274  /// always be accessible even if no aggregate location is provided.
2275  RValue EmitAnyExprToTemp(const Expr *E);
2276 
2277  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2278  /// arbitrary expression into the given memory location.
2279  void EmitAnyExprToMem(const Expr *E, Address Location,
2280  Qualifiers Quals, bool IsInitializer);
2281 
2282  void EmitAnyExprToExn(const Expr *E, Address Addr);
2283 
2284  /// EmitExprAsInit - Emits the code necessary to initialize a
2285  /// location in memory with the given initializer.
2286  void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2287  bool capturedByInit);
2288 
2289  /// hasVolatileMember - returns true if aggregate type has a volatile
2290  /// member.
2292  if (const RecordType *RT = T->getAs<RecordType>()) {
2293  const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2294  return RD->hasVolatileMember();
2295  }
2296  return false;
2297  }
2298 
2299  /// Determine whether a return value slot may overlap some other object.
2301  // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2302  // class subobjects. These cases may need to be revisited depending on the
2303  // resolution of the relevant core issue.
2304  return AggValueSlot::DoesNotOverlap;
2305  }
2306 
2307  /// Determine whether a field initialization may overlap some other object.
2309  // FIXME: These cases can result in overlap as a result of P0840R0's
2310  // [[no_unique_address]] attribute. We can still infer NoOverlap in the
2311  // presence of that attribute if the field is within the nvsize of its
2312  // containing class, because non-virtual subobjects are initialized in
2313  // address order.
2314  return AggValueSlot::DoesNotOverlap;
2315  }
2316 
2317  /// Determine whether a base class initialization may overlap some other
2318  /// object.
2319  AggValueSlot::Overlap_t overlapForBaseInit(const CXXRecordDecl *RD,
2320  const CXXRecordDecl *BaseRD,
2321  bool IsVirtual);
2322 
2323  /// Emit an aggregate assignment.
2324  void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
2325  bool IsVolatile = hasVolatileMember(EltTy);
2326  EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2327  }
2328 
2330  AggValueSlot::Overlap_t MayOverlap) {
2331  EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2332  }
2333 
2334  /// EmitAggregateCopy - Emit an aggregate copy.
2335  ///
2336  /// \param isVolatile \c true iff either the source or the destination is
2337  /// volatile.
2338  /// \param MayOverlap Whether the tail padding of the destination might be
2339  /// occupied by some other object. More efficient code can often be
2340  /// generated if not.
2341  void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
2342  AggValueSlot::Overlap_t MayOverlap,
2343  bool isVolatile = false);
2344 
2345  /// GetAddrOfLocalVar - Return the address of a local variable.
2347  auto it = LocalDeclMap.find(VD);
2348  assert(it != LocalDeclMap.end() &&
2349  "Invalid argument to GetAddrOfLocalVar(), no decl!");
2350  return it->second;
2351  }
2352 
2353  /// Given an opaque value expression, return its LValue mapping if it exists,
2354  /// otherwise create one.
2355  LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
2356 
2357  /// Given an opaque value expression, return its RValue mapping if it exists,
2358  /// otherwise create one.
2359  RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
2360 
2361  /// Get the index of the current ArrayInitLoopExpr, if any.
2362  llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
2363 
2364  /// getAccessedFieldNo - Given an encoded value and a result number, return
2365  /// the input field number being accessed.
2366  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
2367 
2368  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
2369  llvm::BasicBlock *GetIndirectGotoBlock();
2370 
2371  /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
2372  static bool IsWrappedCXXThis(const Expr *E);
2373 
2374  /// EmitNullInitialization - Generate code to set a value of the given type to
2375  /// null, If the type contains data member pointers, they will be initialized
2376  /// to -1 in accordance with the Itanium C++ ABI.
2377  void EmitNullInitialization(Address DestPtr, QualType Ty);
2378 
2379  /// Emits a call to an LLVM variable-argument intrinsic, either
2380  /// \c llvm.va_start or \c llvm.va_end.
2381  /// \param ArgValue A reference to the \c va_list as emitted by either
2382  /// \c EmitVAListRef or \c EmitMSVAListRef.
2383  /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
2384  /// calls \c llvm.va_end.
2385  llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
2386 
2387  /// Generate code to get an argument from the passed in pointer
2388  /// and update it accordingly.
2389  /// \param VE The \c VAArgExpr for which to generate code.
2390  /// \param VAListAddr Receives a reference to the \c va_list as emitted by
2391  /// either \c EmitVAListRef or \c EmitMSVAListRef.
2392  /// \returns A pointer to the argument.
2393  // FIXME: We should be able to get rid of this method and use the va_arg
2394  // instruction in LLVM instead once it works well enough.
2395  Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
2396 
2397  /// emitArrayLength - Compute the length of an array, even if it's a
2398  /// VLA, and drill down to the base element type.
2399  llvm::Value *emitArrayLength(const ArrayType *arrayType,
2400  QualType &baseType,
2401  Address &addr);
2402 
2403  /// EmitVLASize - Capture all the sizes for the VLA expressions in
2404  /// the given variably-modified type and store them in the VLASizeMap.
2405  ///
2406  /// This function can be called with a null (unreachable) insert point.
2407  void EmitVariablyModifiedType(QualType Ty);
2408 
2409  struct VlaSizePair {
2412 
2413  VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
2414  };
2415 
2416  /// Return the number of elements for a single dimension
2417  /// for the given array type.
2418  VlaSizePair getVLAElements1D(const VariableArrayType *vla);
2419  VlaSizePair getVLAElements1D(QualType vla);
2420 
2421  /// Returns an LLVM value that corresponds to the size,
2422  /// in non-variably-sized elements, of a variable length array type,
2423  /// plus that largest non-variably-sized element type. Assumes that
2424  /// the type has already been emitted with EmitVariablyModifiedType.
2425  VlaSizePair getVLASize(const VariableArrayType *vla);
2426  VlaSizePair getVLASize(QualType vla);
2427 
2428  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
2429  /// generating code for an C++ member function.
2431  assert(CXXThisValue && "no 'this' value for this function");
2432  return CXXThisValue;
2433  }
2434  Address LoadCXXThisAddress();
2435 
2436  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
2437  /// virtual bases.
2438  // FIXME: Every place that calls LoadCXXVTT is something
2439  // that needs to be abstracted properly.
2441  assert(CXXStructorImplicitParamValue && "no VTT value for this function");
2442  return CXXStructorImplicitParamValue;
2443  }
2444 
2445  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
2446  /// complete class to the given direct base.
2447  Address
2448  GetAddressOfDirectBaseInCompleteClass(Address Value,
2449  const CXXRecordDecl *Derived,
2450  const CXXRecordDecl *Base,
2451  bool BaseIsVirtual);
2452 
2453  static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
2454 
2455  /// GetAddressOfBaseClass - This function will add the necessary delta to the
2456  /// load of 'this' and returns address of the base class.
2457  Address GetAddressOfBaseClass(Address Value,
2458  const CXXRecordDecl *Derived,
2461  bool NullCheckValue, SourceLocation Loc);
2462 
2463  Address GetAddressOfDerivedClass(Address Value,
2464  const CXXRecordDecl *Derived,
2467  bool NullCheckValue);
2468 
2469  /// GetVTTParameter - Return the VTT parameter that should be passed to a
2470  /// base constructor/destructor with virtual bases.
2471  /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
2472  /// to ItaniumCXXABI.cpp together with all the references to VTT.
2473  llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
2474  bool Delegating);
2475 
2476  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
2477  CXXCtorType CtorType,
2478  const FunctionArgList &Args,
2479  SourceLocation Loc);
2480  // It's important not to confuse this and the previous function. Delegating
2481  // constructors are the C++0x feature. The constructor delegate optimization
2482  // is used to reduce duplication in the base and complete consturctors where
2483  // they are substantially the same.
2484  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2485  const FunctionArgList &Args);
2486 
2487  /// Emit a call to an inheriting constructor (that is, one that invokes a
2488  /// constructor inherited from a base class) by inlining its definition. This
2489  /// is necessary if the ABI does not support forwarding the arguments to the
2490  /// base class constructor (because they're variadic or similar).
2491  void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2492  CXXCtorType CtorType,
2493  bool ForVirtualBase,
2494  bool Delegating,
2495  CallArgList &Args);
2496 
2497  /// Emit a call to a constructor inherited from a base class, passing the
2498  /// current constructor's arguments along unmodified (without even making
2499  /// a copy).
2500  void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
2501  bool ForVirtualBase, Address This,
2502  bool InheritedFromVBase,
2503  const CXXInheritedCtorInitExpr *E);
2504 
2505  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2506  bool ForVirtualBase, bool Delegating,
2507  Address This, const CXXConstructExpr *E,
2508  AggValueSlot::Overlap_t Overlap,
2509  bool NewPointerIsChecked);
2510 
2511  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2512  bool ForVirtualBase, bool Delegating,
2513  Address This, CallArgList &Args,
2514  AggValueSlot::Overlap_t Overlap,
2515  SourceLocation Loc,
2516  bool NewPointerIsChecked);
2517 
2518  /// Emit assumption load for all bases. Requires to be be called only on
2519  /// most-derived class and not under construction of the object.
2520  void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
2521 
2522  /// Emit assumption that vptr load == global vtable.
2523  void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
2524 
2525  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
2526  Address This, Address Src,
2527  const CXXConstructExpr *E);
2528 
2529  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2530  const ArrayType *ArrayTy,
2531  Address ArrayPtr,
2532  const CXXConstructExpr *E,
2533  bool NewPointerIsChecked,
2534  bool ZeroInitialization = false);
2535 
2536  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2537  llvm::Value *NumElements,
2538  Address ArrayPtr,
2539  const CXXConstructExpr *E,
2540  bool NewPointerIsChecked,
2541  bool ZeroInitialization = false);
2542 
2543  static Destroyer destroyCXXObject;
2544 
2545  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
2546  bool ForVirtualBase, bool Delegating,
2547  Address This);
2548 
2549  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
2550  llvm::Type *ElementTy, Address NewPtr,
2551  llvm::Value *NumElements,
2552  llvm::Value *AllocSizeWithoutCookie);
2553 
2554  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
2555  Address Ptr);
2556 
2557  llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
2558  void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
2559 
2560  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
2561  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
2562 
2563  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
2564  QualType DeleteTy, llvm::Value *NumElements = nullptr,
2565  CharUnits CookieSize = CharUnits());
2566 
2567  RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
2568  const CallExpr *TheCallExpr, bool IsDelete);
2569 
2570  llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
2571  llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
2572  Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
2573 
2574  /// Situations in which we might emit a check for the suitability of a
2575  /// pointer or glvalue.
2577  /// Checking the operand of a load. Must be suitably sized and aligned.
2579  /// Checking the destination of a store. Must be suitably sized and aligned.
2581  /// Checking the bound value in a reference binding. Must be suitably sized
2582  /// and aligned, but is not required to refer to an object (until the
2583  /// reference is used), per core issue 453.
2585  /// Checking the object expression in a non-static data member access. Must
2586  /// be an object within its lifetime.
2588  /// Checking the 'this' pointer for a call to a non-static member function.
2589  /// Must be an object within its lifetime.
2591  /// Checking the 'this' pointer for a constructor call.
2593  /// Checking the operand of a static_cast to a derived pointer type. Must be
2594  /// null or an object within its lifetime.
2596  /// Checking the operand of a static_cast to a derived reference type. Must
2597  /// be an object within its lifetime.
2599  /// Checking the operand of a cast to a base object. Must be suitably sized
2600  /// and aligned.
2602  /// Checking the operand of a cast to a virtual base object. Must be an
2603  /// object within its lifetime.
2605  /// Checking the value assigned to a _Nonnull pointer. Must not be null.
2607  /// Checking the operand of a dynamic_cast or a typeid expression. Must be
2608  /// null or an object within its lifetime.
2609  TCK_DynamicOperation
2610  };
2611 
2612  /// Determine whether the pointer type check \p TCK permits null pointers.
2613  static bool isNullPointerAllowed(TypeCheckKind TCK);
2614 
2615  /// Determine whether the pointer type check \p TCK requires a vptr check.
2616  static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
2617 
2618  /// Whether any type-checking sanitizers are enabled. If \c false,
2619  /// calls to EmitTypeCheck can be skipped.
2620  bool sanitizePerformTypeCheck() const;
2621 
2622  /// Emit a check that \p V is the address of storage of the
2623  /// appropriate size and alignment for an object of type \p Type.
2624  void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
2625  QualType Type, CharUnits Alignment = CharUnits::Zero(),
2626  SanitizerSet SkippedChecks = SanitizerSet());
2627 
2628  /// Emit a check that \p Base points into an array object, which
2629  /// we can access at index \p Index. \p Accessed should be \c false if we
2630  /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
2631  void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
2632  QualType IndexType, bool Accessed);
2633 
2634  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2635  bool isInc, bool isPre);
2636  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
2637  bool isInc, bool isPre);
2638 
2639  /// Converts Location to a DebugLoc, if debug information is enabled.
2640  llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
2641 
2642 
2643  //===--------------------------------------------------------------------===//
2644  // Declaration Emission
2645  //===--------------------------------------------------------------------===//
2646 
2647  /// EmitDecl - Emit a declaration.
2648  ///
2649  /// This function can be called with a null (unreachable) insert point.
2650  void EmitDecl(const Decl &D);
2651 
2652  /// EmitVarDecl - Emit a local variable declaration.
2653  ///
2654  /// This function can be called with a null (unreachable) insert point.
2655  void EmitVarDecl(const VarDecl &D);
2656 
2657  void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2658  bool capturedByInit);
2659 
2660  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
2661  llvm::Value *Address);
2662 
2663  /// Determine whether the given initializer is trivial in the sense
2664  /// that it requires no code to be generated.
2665  bool isTrivialInitializer(const Expr *Init);
2666 
2667  /// EmitAutoVarDecl - Emit an auto variable declaration.
2668  ///
2669  /// This function can be called with a null (unreachable) insert point.
2670  void EmitAutoVarDecl(const VarDecl &D);
2671 
2673  friend class CodeGenFunction;
2674 
2675  const VarDecl *Variable;
2676 
2677  /// The address of the alloca for languages with explicit address space
2678  /// (e.g. OpenCL) or alloca casted to generic pointer for address space
2679  /// agnostic languages (e.g. C++). Invalid if the variable was emitted
2680  /// as a global constant.
2681  Address Addr;
2682 
2683  llvm::Value *NRVOFlag;
2684 
2685  /// True if the variable is a __block variable that is captured by an
2686  /// escaping block.
2687  bool IsEscapingByRef;
2688 
2689  /// True if the variable is of aggregate type and has a constant
2690  /// initializer.
2691  bool IsConstantAggregate;
2692 
2693  /// Non-null if we should use lifetime annotations.
2694  llvm::Value *SizeForLifetimeMarkers;
2695 
2696  /// Address with original alloca instruction. Invalid if the variable was
2697  /// emitted as a global constant.
2698  Address AllocaAddr;
2699 
2700  struct Invalid {};
2701  AutoVarEmission(Invalid)
2702  : Variable(nullptr), Addr(Address::invalid()),
2703  AllocaAddr(Address::invalid()) {}
2704 
2705  AutoVarEmission(const VarDecl &variable)
2706  : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
2707  IsEscapingByRef(false), IsConstantAggregate(false),
2708  SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
2709 
2710  bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
2711 
2712  public:
2713  static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
2714 
2715  bool useLifetimeMarkers() const {
2716  return SizeForLifetimeMarkers != nullptr;
2717  }
2719  assert(useLifetimeMarkers());
2720  return SizeForLifetimeMarkers;
2721  }
2722 
2723  /// Returns the raw, allocated address, which is not necessarily
2724  /// the address of the object itself. It is casted to default
2725  /// address space for address space agnostic languages.
2727  return Addr;
2728  }
2729 
2730  /// Returns the address for the original alloca instruction.
2731  Address getOriginalAllocatedAddress() const { return AllocaAddr; }
2732 
2733  /// Returns the address of the object within this declaration.
2734  /// Note that this does not chase the forwarding pointer for
2735  /// __block decls.
2736  Address getObjectAddress(CodeGenFunction &CGF) const {
2737  if (!IsEscapingByRef) return Addr;
2738 
2739  return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
2740  }
2741  };
2742  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
2743  void EmitAutoVarInit(const AutoVarEmission &emission);
2744  void EmitAutoVarCleanups(const AutoVarEmission &emission);
2745  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
2746  QualType::DestructionKind dtorKind);
2747 
2748  /// Emits the alloca and debug information for the size expressions for each
2749  /// dimension of an array. It registers the association of its (1-dimensional)
2750  /// QualTypes and size expression's debug node, so that CGDebugInfo can
2751  /// reference this node when creating the DISubrange object to describe the
2752  /// array types.
2753  void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI,
2754  const VarDecl &D,
2755  bool EmitDebugInfo);
2756 
2757  void EmitStaticVarDecl(const VarDecl &D,
2758  llvm::GlobalValue::LinkageTypes Linkage);
2759 
2760  class ParamValue {
2761  llvm::Value *Value;
2762  unsigned Alignment;
2763  ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
2764  public:
2766  return ParamValue(value, 0);
2767  }
2769  assert(!addr.getAlignment().isZero());
2770  return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
2771  }
2772 
2773  bool isIndirect() const { return Alignment != 0; }
2774  llvm::Value *getAnyValue() const { return Value; }
2775 
2777  assert(!isIndirect());
2778  return Value;
2779  }
2780 
2782  assert(isIndirect());
2783  return Address(Value, CharUnits::fromQuantity(Alignment));
2784  }
2785  };
2786 
2787  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
2788  void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
2789 
2790  /// protectFromPeepholes - Protect a value that we're intending to
2791  /// store to the side, but which will probably be used later, from
2792  /// aggressive peepholing optimizations that might delete it.
2793  ///
2794  /// Pass the result to unprotectFromPeepholes to declare that
2795  /// protection is no longer required.
2796  ///
2797  /// There's no particular reason why this shouldn't apply to
2798  /// l-values, it's just that no existing peepholes work on pointers.
2799  PeepholeProtection protectFromPeepholes(RValue rvalue);
2800  void unprotectFromPeepholes(PeepholeProtection protection);
2801 
2802  void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
2803  SourceLocation Loc,
2804  SourceLocation AssumptionLoc,
2805  llvm::Value *Alignment,
2806  llvm::Value *OffsetValue,
2807  llvm::Value *TheCheck,
2808  llvm::Instruction *Assumption);
2809 
2810  void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
2811  SourceLocation Loc, SourceLocation AssumptionLoc,
2812  llvm::Value *Alignment,
2813  llvm::Value *OffsetValue = nullptr);
2814 
2815  void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
2816  SourceLocation Loc, SourceLocation AssumptionLoc,
2817  unsigned Alignment,
2818  llvm::Value *OffsetValue = nullptr);
2819 
2820  void EmitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
2821  SourceLocation AssumptionLoc, unsigned Alignment,
2822  llvm::Value *OffsetValue = nullptr);
2823 
2824  //===--------------------------------------------------------------------===//
2825  // Statement Emission
2826  //===--------------------------------------------------------------------===//
2827 
2828  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
2829  void EmitStopPoint(const Stmt *S);
2830 
2831  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
2832  /// this function even if there is no current insertion point.
2833  ///
2834  /// This function may clear the current insertion point; callers should use
2835  /// EnsureInsertPoint if they wish to subsequently generate code without first
2836  /// calling EmitBlock, EmitBranch, or EmitStmt.
2837  void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = None);
2838 
2839  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
2840  /// necessarily require an insertion point or debug information; typically
2841  /// because the statement amounts to a jump or a container of other
2842  /// statements.
2843  ///
2844  /// \return True if the statement was handled.
2845  bool EmitSimpleStmt(const Stmt *S);
2846 
2847  Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
2848  AggValueSlot AVS = AggValueSlot::ignored());
2849  Address EmitCompoundStmtWithoutScope(const CompoundStmt &S,
2850  bool GetLast = false,
2851  AggValueSlot AVS =
2852  AggValueSlot::ignored());
2853 
2854  /// EmitLabel - Emit the block for the given label. It is legal to call this
2855  /// function even if there is no current insertion point.
2856  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
2857 
2858  void EmitLabelStmt(const LabelStmt &S);
2859  void EmitAttributedStmt(const AttributedStmt &S);
2860  void EmitGotoStmt(const GotoStmt &S);
2861  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
2862  void EmitIfStmt(const IfStmt &S);
2863 
2864  void EmitWhileStmt(const WhileStmt &S,
2865  ArrayRef<const Attr *> Attrs = None);
2866  void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
2867  void EmitForStmt(const ForStmt &S,
2868  ArrayRef<const Attr *> Attrs = None);
2869  void EmitReturnStmt(const ReturnStmt &S);
2870  void EmitDeclStmt(const DeclStmt &S);
2871  void EmitBreakStmt(const BreakStmt &S);
2872  void EmitContinueStmt(const ContinueStmt &S);
2873  void EmitSwitchStmt(const SwitchStmt &S);
2874  void EmitDefaultStmt(const DefaultStmt &S);
2875  void EmitCaseStmt(const CaseStmt &S);
2876  void EmitCaseStmtRange(const CaseStmt &S);
2877  void EmitAsmStmt(const AsmStmt &S);
2878 
2879  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
2880  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
2881  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
2882  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
2883  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
2884 
2885  void EmitCoroutineBody(const CoroutineBodyStmt &S);
2886  void EmitCoreturnStmt(const CoreturnStmt &S);
2887  RValue EmitCoawaitExpr(const CoawaitExpr &E,
2888  AggValueSlot aggSlot = AggValueSlot::ignored(),
2889  bool ignoreResult = false);
2890  LValue EmitCoawaitLValue(const CoawaitExpr *E);
2891  RValue EmitCoyieldExpr(const CoyieldExpr &E,
2892  AggValueSlot aggSlot = AggValueSlot::ignored(),
2893  bool ignoreResult = false);
2894  LValue EmitCoyieldLValue(const CoyieldExpr *E);
2895  RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
2896 
2897  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2898  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2899 
2900  void EmitCXXTryStmt(const CXXTryStmt &S);
2901  void EmitSEHTryStmt(const SEHTryStmt &S);
2902  void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
2903  void EnterSEHTryStmt(const SEHTryStmt &S);
2904  void ExitSEHTryStmt(const SEHTryStmt &S);
2905 
2906  void pushSEHCleanup(CleanupKind kind,
2907  llvm::Function *FinallyFunc);
2908  void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
2909  const Stmt *OutlinedStmt);
2910 
2911  llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
2912  const SEHExceptStmt &Except);
2913 
2914  llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
2915  const SEHFinallyStmt &Finally);
2916 
2917  void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
2918  llvm::Value *ParentFP,
2919  llvm::Value *EntryEBP);
2920  llvm::Value *EmitSEHExceptionCode();
2921  llvm::Value *EmitSEHExceptionInfo();
2922  llvm::Value *EmitSEHAbnormalTermination();
2923 
2924  /// Emit simple code for OpenMP directives in Simd-only mode.
2925  void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
2926 
2927  /// Scan the outlined statement for captures from the parent function. For
2928  /// each capture, mark the capture as escaped and emit a call to
2929  /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
2930  void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
2931  bool IsFilter);
2932 
2933  /// Recovers the address of a local in a parent function. ParentVar is the
2934  /// address of the variable used in the immediate parent function. It can
2935  /// either be an alloca or a call to llvm.localrecover if there are nested
2936  /// outlined functions. ParentFP is the frame pointer of the outermost parent
2937  /// frame.
2938  Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
2939  Address ParentVar,
2940  llvm::Value *ParentFP);
2941 
2942  void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
2943  ArrayRef<const Attr *> Attrs = None);
2944 
2945  /// Controls insertion of cancellation exit blocks in worksharing constructs.
2947  CodeGenFunction &CGF;
2948 
2949  public:
2950  OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
2951  bool HasCancel)
2952  : CGF(CGF) {
2953  CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
2954  }
2955  ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
2956  };
2957 
2958  /// Returns calculated size of the specified type.
2959  llvm::Value *getTypeSize(QualType Ty);
2960  LValue InitCapturedStruct(const CapturedStmt &S);
2961  llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
2962  llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
2963  Address GenerateCapturedStmtArgument(const CapturedStmt &S);
2964  llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
2965  void GenerateOpenMPCapturedVars(const CapturedStmt &S,
2966  SmallVectorImpl<llvm::Value *> &CapturedVars);
2967  void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
2968  SourceLocation Loc);
2969  /// Perform element by element copying of arrays with type \a
2970  /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
2971  /// generated by \a CopyGen.
2972  ///
2973  /// \param DestAddr Address of the destination array.
2974  /// \param SrcAddr Address of the source array.
2975  /// \param OriginalType Type of destination and source arrays.
2976  /// \param CopyGen Copying procedure that copies value of single array element
2977  /// to another single array element.
2978  void EmitOMPAggregateAssign(
2979  Address DestAddr, Address SrcAddr, QualType OriginalType,
2980  const llvm::function_ref<void(Address, Address)> CopyGen);
2981  /// Emit proper copying of data from one variable to another.
2982  ///
2983  /// \param OriginalType Original type of the copied variables.
2984  /// \param DestAddr Destination address.
2985  /// \param SrcAddr Source address.
2986  /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
2987  /// type of the base array element).
2988  /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
2989  /// the base array element).
2990  /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
2991  /// DestVD.
2992  void EmitOMPCopy(QualType OriginalType,
2993  Address DestAddr, Address SrcAddr,
2994  const VarDecl *DestVD, const VarDecl *SrcVD,
2995  const Expr *Copy);
2996  /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
2997  /// \a X = \a E \a BO \a E.
2998  ///
2999  /// \param X Value to be updated.
3000  /// \param E Update value.
3001  /// \param BO Binary operation for update operation.
3002  /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3003  /// expression, false otherwise.
3004  /// \param AO Atomic ordering of the generated atomic instructions.
3005  /// \param CommonGen Code generator for complex expressions that cannot be
3006  /// expressed through atomicrmw instruction.
3007  /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3008  /// generated, <false, RValue::get(nullptr)> otherwise.
3009  std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3010  LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3011  llvm::AtomicOrdering AO, SourceLocation Loc,
3012  const llvm::function_ref<RValue(RValue)> CommonGen);
3013  bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
3014  OMPPrivateScope &PrivateScope);
3015  void EmitOMPPrivateClause(const OMPExecutableDirective &D,
3016  OMPPrivateScope &PrivateScope);
3017  void EmitOMPUseDevicePtrClause(
3018  const OMPClause &C, OMPPrivateScope &PrivateScope,
3019  const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
3020  /// Emit code for copyin clause in \a D directive. The next code is
3021  /// generated at the start of outlined functions for directives:
3022  /// \code
3023  /// threadprivate_var1 = master_threadprivate_var1;
3024  /// operator=(threadprivate_var2, master_threadprivate_var2);
3025  /// ...
3026  /// __kmpc_barrier(&loc, global_tid);
3027  /// \endcode
3028  ///
3029  /// \param D OpenMP directive possibly with 'copyin' clause(s).
3030  /// \returns true if at least one copyin variable is found, false otherwise.
3031  bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
3032  /// Emit initial code for lastprivate variables. If some variable is
3033  /// not also firstprivate, then the default initialization is used. Otherwise
3034  /// initialization of this variable is performed by EmitOMPFirstprivateClause
3035  /// method.
3036  ///
3037  /// \param D Directive that may have 'lastprivate' directives.
3038  /// \param PrivateScope Private scope for capturing lastprivate variables for
3039  /// proper codegen in internal captured statement.
3040  ///
3041  /// \returns true if there is at least one lastprivate variable, false
3042  /// otherwise.
3043  bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
3044  OMPPrivateScope &PrivateScope);
3045  /// Emit final copying of lastprivate values to original variables at
3046  /// the end of the worksharing or simd directive.
3047  ///
3048  /// \param D Directive that has at least one 'lastprivate' directives.
3049  /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3050  /// it is the last iteration of the loop code in associated directive, or to
3051  /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3052  void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
3053  bool NoFinals,
3054  llvm::Value *IsLastIterCond = nullptr);
3055  /// Emit initial code for linear clauses.
3056  void EmitOMPLinearClause(const OMPLoopDirective &D,
3057  CodeGenFunction::OMPPrivateScope &PrivateScope);
3058  /// Emit final code for linear clauses.
3059  /// \param CondGen Optional conditional code for final part of codegen for
3060  /// linear clause.
3061  void EmitOMPLinearClauseFinal(
3062  const OMPLoopDirective &D,
3063  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3064  /// Emit initial code for reduction variables. Creates reduction copies
3065  /// and initializes them with the values according to OpenMP standard.
3066  ///
3067  /// \param D Directive (possibly) with the 'reduction' clause.
3068  /// \param PrivateScope Private scope for capturing reduction variables for
3069  /// proper codegen in internal captured statement.
3070  ///
3071  void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
3072  OMPPrivateScope &PrivateScope);
3073  /// Emit final update of reduction values to original variables at
3074  /// the end of the directive.
3075  ///
3076  /// \param D Directive that has at least one 'reduction' directives.
3077  /// \param ReductionKind The kind of reduction to perform.
3078  void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
3079  const OpenMPDirectiveKind ReductionKind);
3080  /// Emit initial code for linear variables. Creates private copies
3081  /// and initializes them with the values according to OpenMP standard.
3082  ///
3083  /// \param D Directive (possibly) with the 'linear' clause.
3084  /// \return true if at least one linear variable is found that should be
3085  /// initialized with the value of the original variable, false otherwise.
3086  bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
3087 
3088  typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3089  llvm::Value * /*OutlinedFn*/,
3090  const OMPTaskDataTy & /*Data*/)>
3092  void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
3093  const OpenMPDirectiveKind CapturedRegion,
3094  const RegionCodeGenTy &BodyGen,
3095  const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3097  Address BasePointersArray = Address::invalid();
3098  Address PointersArray = Address::invalid();
3099  Address SizesArray = Address::invalid();
3100  unsigned NumberOfTargetItems = 0;
3101  explicit OMPTargetDataInfo() = default;
3102  OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
3103  Address SizesArray, unsigned NumberOfTargetItems)
3104  : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
3105  SizesArray(SizesArray), NumberOfTargetItems(NumberOfTargetItems) {}
3106  };
3107  void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
3108  const RegionCodeGenTy &BodyGen,
3109  OMPTargetDataInfo &InputInfo);
3110 
3111  void EmitOMPParallelDirective(const OMPParallelDirective &S);
3112  void EmitOMPSimdDirective(const OMPSimdDirective &S);
3113  void EmitOMPForDirective(const OMPForDirective &S);
3114  void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
3115  void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
3116  void EmitOMPSectionDirective(const OMPSectionDirective &S);
3117  void EmitOMPSingleDirective(const OMPSingleDirective &S);
3118  void EmitOMPMasterDirective(const OMPMasterDirective &S);
3119  void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
3120  void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
3121  void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
3122  void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
3123  void EmitOMPTaskDirective(const OMPTaskDirective &S);
3124  void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
3125  void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
3126  void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
3127  void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
3128  void EmitOMPFlushDirective(const OMPFlushDirective &S);
3129  void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
3130  void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
3131  void EmitOMPTargetDirective(const OMPTargetDirective &S);
3132  void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
3133  void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
3134  void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
3135  void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
3136  void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
3137  void
3138  EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
3139  void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
3140  void
3141  EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
3142  void EmitOMPCancelDirective(const OMPCancelDirective &S);
3143  void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
3144  void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
3145  void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
3146  void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
3147  void EmitOMPDistributeParallelForDirective(
3149  void EmitOMPDistributeParallelForSimdDirective(
3151  void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
3152  void EmitOMPTargetParallelForSimdDirective(
3154  void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
3155  void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
3156  void
3157  EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
3158  void EmitOMPTeamsDistributeParallelForSimdDirective(
3160  void EmitOMPTeamsDistributeParallelForDirective(
3162  void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3163  void EmitOMPTargetTeamsDistributeDirective(
3165  void EmitOMPTargetTeamsDistributeParallelForDirective(
3167  void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3169  void EmitOMPTargetTeamsDistributeSimdDirective(
3171 
3172  /// Emit device code for the target directive.
3173  static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3174  StringRef ParentName,
3175  const OMPTargetDirective &S);
3176  static void
3177  EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3178  const OMPTargetParallelDirective &S);
3179  /// Emit device code for the target parallel for directive.
3180  static void EmitOMPTargetParallelForDeviceFunction(
3181  CodeGenModule &CGM, StringRef ParentName,
3183  /// Emit device code for the target parallel for simd directive.
3184  static void EmitOMPTargetParallelForSimdDeviceFunction(
3185  CodeGenModule &CGM, StringRef ParentName,
3187  /// Emit device code for the target teams directive.
3188  static void
3189  EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3190  const OMPTargetTeamsDirective &S);
3191  /// Emit device code for the target teams distribute directive.
3192  static void EmitOMPTargetTeamsDistributeDeviceFunction(
3193  CodeGenModule &CGM, StringRef ParentName,
3195  /// Emit device code for the target teams distribute simd directive.
3196  static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3197  CodeGenModule &CGM, StringRef ParentName,
3199  /// Emit device code for the target simd directive.
3200  static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
3201  StringRef ParentName,
3202  const OMPTargetSimdDirective &S);
3203  /// Emit device code for the target teams distribute parallel for simd
3204  /// directive.
3205  static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3206  CodeGenModule &CGM, StringRef ParentName,
3208 
3209  static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3210  CodeGenModule &CGM, StringRef ParentName,
3212  /// Emit inner loop of the worksharing/simd construct.
3213  ///
3214  /// \param S Directive, for which the inner loop must be emitted.
3215  /// \param RequiresCleanup true, if directive has some associated private
3216  /// variables.
3217  /// \param LoopCond Bollean condition for loop continuation.
3218  /// \param IncExpr Increment expression for loop control variable.
3219  /// \param BodyGen Generator for the inner body of the inner loop.
3220  /// \param PostIncGen Genrator for post-increment code (required for ordered
3221  /// loop directvies).
3222  void EmitOMPInnerLoop(
3223  const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
3224  const Expr *IncExpr,
3225  const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3226  const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3227 
3228  JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
3229  /// Emit initial code for loop counters of loop-based directives.
3230  void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
3231  OMPPrivateScope &LoopScope);
3232 
3233  /// Helper for the OpenMP loop directives.
3234  void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
3235 
3236  /// Emit code for the worksharing loop-based directive.
3237  /// \return true, if this construct has any lastprivate clause, false -
3238  /// otherwise.
3239  bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
3240  const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3241  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3242 
3243  /// Emit code for the distribute loop-based directive.
3244  void EmitOMPDistributeLoop(const OMPLoopDirective &S,
3245  const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
3246 
3247  /// Helpers for the OpenMP loop directives.
3248  void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
3249  void EmitOMPSimdFinal(
3250  const OMPLoopDirective &D,
3251  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3252 
3253  /// Emits the lvalue for the expression with possibly captured variable.
3254  LValue EmitOMPSharedLValue(const Expr *E);
3255 
3256 private:
3257  /// Helpers for blocks.
3258  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
3259 
3260  /// struct with the values to be passed to the OpenMP loop-related functions
3261  struct OMPLoopArguments {
3262  /// loop lower bound
3263  Address LB = Address::invalid();
3264  /// loop upper bound
3265  Address UB = Address::invalid();
3266  /// loop stride
3267  Address ST = Address::invalid();
3268  /// isLastIteration argument for runtime functions
3269  Address IL = Address::invalid();
3270  /// Chunk value generated by sema
3271  llvm::Value *Chunk = nullptr;
3272  /// EnsureUpperBound
3273  Expr *EUB = nullptr;
3274  /// IncrementExpression
3275  Expr *IncExpr = nullptr;
3276  /// Loop initialization
3277  Expr *Init = nullptr;
3278  /// Loop exit condition
3279  Expr *Cond = nullptr;
3280  /// Update of LB after a whole chunk has been executed
3281  Expr *NextLB = nullptr;
3282  /// Update of UB after a whole chunk has been executed
3283  Expr *NextUB = nullptr;
3284  OMPLoopArguments() = default;
3285  OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
3286  llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
3287  Expr *IncExpr = nullptr, Expr *Init = nullptr,
3288  Expr *Cond = nullptr, Expr *NextLB = nullptr,
3289  Expr *NextUB = nullptr)
3290  : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
3291  IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
3292  NextUB(NextUB) {}
3293  };
3294  void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
3295  const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
3296  const OMPLoopArguments &LoopArgs,
3297  const CodeGenLoopTy &CodeGenLoop,
3298  const CodeGenOrderedTy &CodeGenOrdered);
3299  void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
3300  bool IsMonotonic, const OMPLoopDirective &S,
3301  OMPPrivateScope &LoopScope, bool Ordered,
3302  const OMPLoopArguments &LoopArgs,
3303  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3304  void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
3305  const OMPLoopDirective &S,
3306  OMPPrivateScope &LoopScope,
3307  const OMPLoopArguments &LoopArgs,
3308  const CodeGenLoopTy &CodeGenLoopContent);
3309  /// Emit code for sections directive.
3310  void EmitSections(const OMPExecutableDirective &S);
3311 
3312 public:
3313 
3314  //===--------------------------------------------------------------------===//
3315  // LValue Expression Emission
3316  //===--------------------------------------------------------------------===//
3317 
3318  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
3319  RValue GetUndefRValue(QualType Ty);
3320 
3321  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
3322  /// and issue an ErrorUnsupported style diagnostic (using the
3323  /// provided Name).
3324  RValue EmitUnsupportedRValue(const Expr *E,
3325  const char *Name);
3326 
3327  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
3328  /// an ErrorUnsupported style diagnostic (using the provided Name).
3329  LValue EmitUnsupportedLValue(const Expr *E,
3330  const char *Name);
3331 
3332  /// EmitLValue - Emit code to compute a designator that specifies the location
3333  /// of the expression.
3334  ///
3335  /// This can return one of two things: a simple address or a bitfield
3336  /// reference. In either case, the LLVM Value* in the LValue structure is
3337  /// guaranteed to be an LLVM pointer type.
3338  ///
3339  /// If this returns a bitfield reference, nothing about the pointee type of
3340  /// the LLVM value is known: For example, it may not be a pointer to an
3341  /// integer.
3342  ///
3343  /// If this returns a normal address, and if the lvalue's C type is fixed
3344  /// size, this method guarantees that the returned pointer type will point to
3345  /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
3346  /// variable length type, this is not possible.
3347  ///
3348  LValue EmitLValue(const Expr *E);
3349 
3350  /// Same as EmitLValue but additionally we generate checking code to
3351  /// guard against undefined behavior. This is only suitable when we know
3352  /// that the address will be used to access the object.
3353  LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
3354 
3355  RValue convertTempToRValue(Address addr, QualType type,
3356  SourceLocation Loc);
3357 
3358  void EmitAtomicInit(Expr *E, LValue lvalue);
3359 
3360  bool LValueIsSuitableForInlineAtomic(LValue Src);
3361 
3362  RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
3363  AggValueSlot Slot = AggValueSlot::ignored());
3364 
3365  RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
3366  llvm::AtomicOrdering AO, bool IsVolatile = false,
3367  AggValueSlot slot = AggValueSlot::ignored());
3368 
3369  void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
3370 
3371  void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
3372  bool IsVolatile, bool isInit);
3373 
3374  std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
3375  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
3376  llvm::AtomicOrdering Success =
3377  llvm::AtomicOrdering::SequentiallyConsistent,
3378  llvm::AtomicOrdering Failure =
3379  llvm::AtomicOrdering::SequentiallyConsistent,
3380  bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
3381 
3382  void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
3383  const llvm::function_ref<RValue(RValue)> &UpdateOp,
3384  bool IsVolatile);
3385 
3386  /// EmitToMemory - Change a scalar value from its value
3387  /// representation to its in-memory representation.
3388  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
3389 
3390  /// EmitFromMemory - Change a scalar value from its memory
3391  /// representation to its value representation.
3392  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
3393 
3394  /// Check if the scalar \p Value is within the valid range for the given
3395  /// type \p Ty.
3396  ///
3397  /// Returns true if a check is needed (even if the range is unknown).
3398  bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
3399  SourceLocation Loc);
3400 
3401  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3402  /// care to appropriately convert from the memory representation to
3403  /// the LLVM value representation.
3404  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3405  SourceLocation Loc,
3407  bool isNontemporal = false) {
3408  return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
3409  CGM.getTBAAAccessInfo(Ty), isNontemporal);
3410  }
3411 
3412  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3413  SourceLocation Loc, LValueBaseInfo BaseInfo,
3414  TBAAAccessInfo TBAAInfo,
3415  bool isNontemporal = false);
3416 
3417  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3418  /// care to appropriately convert from the memory representation to
3419  /// the LLVM value representation. The l-value must be a simple
3420  /// l-value.
3421  llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
3422 
3423  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3424  /// care to appropriately convert from the memory representation to
3425  /// the LLVM value representation.
3427  bool Volatile, QualType Ty,
3429  bool isInit = false, bool isNontemporal = false) {
3430  EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
3431  CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
3432  }
3433 
3434  void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
3435  bool Volatile, QualType Ty,
3436  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
3437  bool isInit = false, bool isNontemporal = false);
3438 
3439  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3440  /// care to appropriately convert from the memory representation to
3441  /// the LLVM value representation. The l-value must be a simple
3442  /// l-value. The isInit flag indicates whether this is an initialization.
3443  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
3444  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
3445 
3446  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
3447  /// this method emits the address of the lvalue, then loads the result as an
3448  /// rvalue, returning the rvalue.
3449  RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
3450  RValue EmitLoadOfExtVectorElementLValue(LValue V);
3451  RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
3452  RValue EmitLoadOfGlobalRegLValue(LValue LV);
3453 
3454  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
3455  /// lvalue, where both are guaranteed to the have the same type, and that type
3456  /// is 'Ty'.
3457  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
3458  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
3459  void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
3460 
3461  /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
3462  /// as EmitStoreThroughLValue.
3463  ///
3464  /// \param Result [out] - If non-null, this will be set to a Value* for the
3465  /// bit-field contents after the store, appropriate for use as the result of
3466  /// an assignment to the bit-field.
3467  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
3468  llvm::Value **Result=nullptr);
3469 
3470  /// Emit an l-value for an assignment (simple or compound) of complex type.
3471  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
3472  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
3473  LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
3474  llvm::Value *&Result);
3475 
3476  // Note: only available for agg return types
3477  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
3478  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
3479  // Note: only available for agg return types
3480  LValue EmitCallExprLValue(const CallExpr *E);
3481  // Note: only available for agg return types
3482  LValue EmitVAArgExprLValue(const VAArgExpr *E);
3483  LValue EmitDeclRefLValue(const DeclRefExpr *E);
3484  LValue EmitStringLiteralLValue(const StringLiteral *E);
3485  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
3486  LValue EmitPredefinedLValue(const PredefinedExpr *E);
3487  LValue EmitUnaryOpLValue(const UnaryOperator *E);
3488  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
3489  bool Accessed = false);
3490  LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
3491  bool IsLowerBound = true);
3492  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
3493  LValue EmitMemberExpr(const MemberExpr *E);
3494  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
3495  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
3496  LValue EmitInitListLValue(const InitListExpr *E);
3497  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
3498  LValue EmitCastLValue(const CastExpr *E);
3499  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
3500  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
3501 
3502  Address EmitExtVectorElementLValue(LValue V);
3503 
3504  RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
3505 
3506  Address EmitArrayToPointerDecay(const Expr *Array,
3507  LValueBaseInfo *BaseInfo = nullptr,
3508  TBAAAccessInfo *TBAAInfo = nullptr);
3509 
3511  llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
3512  ConstantEmission(llvm::Constant *C, bool isReference)
3513  : ValueAndIsReference(C, isReference) {}
3514  public:
3516  static ConstantEmission forReference(llvm::Constant *C) {
3517  return ConstantEmission(C, true);
3518  }
3519  static ConstantEmission forValue(llvm::Constant *C) {
3520  return ConstantEmission(C, false);
3521  }
3522 
3523  explicit operator bool() const {
3524  return ValueAndIsReference.getOpaqueValue() != nullptr;
3525  }
3526 
3527  bool isReference() const { return ValueAndIsReference.getInt(); }
3528  LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
3529  assert(isReference());
3530  return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
3531  refExpr->getType());
3532  }
3533 
3534  llvm::Constant *getValue() const {
3535  assert(!isReference());
3536  return ValueAndIsReference.getPointer();
3537  }
3538  };
3539 
3540  ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
3541  ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
3542  llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
3543 
3544  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
3545  AggValueSlot slot = AggValueSlot::ignored());
3546  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
3547 
3548  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
3549  const ObjCIvarDecl *Ivar);
3550  LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
3551  LValue EmitLValueForLambdaField(const FieldDecl *Field);
3552 
3553  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
3554  /// if the Field is a reference, this will return the address of the reference
3555  /// and not the address of the value stored in the reference.
3556  LValue EmitLValueForFieldInitialization(LValue Base,
3557  const FieldDecl* Field);
3558 
3559  LValue EmitLValueForIvar(QualType ObjectTy,
3560  llvm::Value* Base, const ObjCIvarDecl *Ivar,
3561  unsigned CVRQualifiers);
3562 
3563  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
3564  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
3565  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
3566  LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
3567 
3568  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
3569  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
3570  LValue EmitStmtExprLValue(const StmtExpr *E);
3571  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
3572  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
3573  void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
3574 
3575  //===--------------------------------------------------------------------===//
3576  // Scalar Expression Emission
3577  //===--------------------------------------------------------------------===//
3578 
3579  /// EmitCall - Generate a call of the given function, expecting the given
3580  /// result type, and using the given argument list which specifies both the
3581  /// LLVM arguments and the types they were derived from.
3582  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3583  ReturnValueSlot ReturnValue, const CallArgList &Args,
3584  llvm::Instruction **callOrInvoke, SourceLocation Loc);
3585  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3586  ReturnValueSlot ReturnValue, const CallArgList &Args,
3587  llvm::Instruction **callOrInvoke = nullptr) {
3588  return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
3589  SourceLocation());
3590  }
3591  RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
3592  ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
3593  RValue EmitCallExpr(const CallExpr *E,
3594  ReturnValueSlot ReturnValue = ReturnValueSlot());
3595  RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3596  CGCallee EmitCallee(const Expr *E);
3597 
3598  void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
3599 
3600  llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
3601  const Twine &name = "");
3602  llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
3604  const Twine &name = "");
3605  llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
3606  const Twine &name = "");
3607  llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
3609  const Twine &name = "");
3610 
3612  getBundlesForFunclet(llvm::Value *Callee);
3613 
3614  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
3616  const Twine &Name = "");
3617  llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
3619  const Twine &name = "");
3620  llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
3621  const Twine &name = "");
3622  void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3623  ArrayRef<llvm::Value*> args);
3624 
3626  NestedNameSpecifier *Qual,
3627  llvm::Type *Ty);
3628 
3629  CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
3630  CXXDtorType Type,
3631  const CXXRecordDecl *RD);
3632 
3633  // Return the copy constructor name with the prefix "__copy_constructor_"
3634  // removed.
3635  static std::string getNonTrivialCopyConstructorStr(QualType QT,
3636  CharUnits Alignment,
3637  bool IsVolatile,
3638  ASTContext &Ctx);
3639 
3640  // Return the destructor name with the prefix "__destructor_" removed.
3641  static std::string getNonTrivialDestructorStr(QualType QT,
3642  CharUnits Alignment,
3643  bool IsVolatile,
3644  ASTContext &Ctx);
3645 
3646  // These functions emit calls to the special functions of non-trivial C
3647  // structs.
3648  void defaultInitNonTrivialCStructVar(LValue Dst);
3649  void callCStructDefaultConstructor(LValue Dst);
3650  void callCStructDestructor(LValue Dst);
3651  void callCStructCopyConstructor(LValue Dst, LValue Src);
3652  void callCStructMoveConstructor(LValue Dst, LValue Src);
3653  void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
3654  void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
3655 
3656  RValue
3657  EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method,
3658  const CGCallee &Callee,
3659  ReturnValueSlot ReturnValue, llvm::Value *This,
3660  llvm::Value *ImplicitParam,
3661  QualType ImplicitParamTy, const CallExpr *E,
3662  CallArgList *RtlArgs);
3663  RValue EmitCXXDestructorCall(const CXXDestructorDecl *DD,
3664  const CGCallee &Callee,
3665  llvm::Value *This, llvm::Value *ImplicitParam,
3666  QualType ImplicitParamTy, const CallExpr *E,
3667  StructorType Type);
3668  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
3669  ReturnValueSlot ReturnValue);
3670  RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
3671  const CXXMethodDecl *MD,
3672  ReturnValueSlot ReturnValue,
3673  bool HasQualifier,
3674  NestedNameSpecifier *Qualifier,
3675  bool IsArrow, const Expr *Base);
3676  // Compute the object pointer.
3677  Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
3678  llvm::Value *memberPtr,
3679  const MemberPointerType *memberPtrType,
3680  LValueBaseInfo *BaseInfo = nullptr,
3681  TBAAAccessInfo *TBAAInfo = nullptr);
3682  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
3683  ReturnValueSlot ReturnValue);
3684 
3685  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
3686  const CXXMethodDecl *MD,
3687  ReturnValueSlot ReturnValue);
3688  RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
3689 
3690  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
3691  ReturnValueSlot ReturnValue);
3692 
3693  RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
3694  ReturnValueSlot ReturnValue);
3695 
3696  RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
3697  const CallExpr *E, ReturnValueSlot ReturnValue);
3698 
3699  RValue emitRotate(const CallExpr *E, bool IsRotateRight);
3700 
3701  /// Emit IR for __builtin_os_log_format.
3702  RValue emitBuiltinOSLogFormat(const CallExpr &E);
3703 
3704  llvm::Function *generateBuiltinOSLogHelperFunction(
3705  const analyze_os_log::OSLogBufferLayout &Layout,
3706  CharUnits BufferAlignment);
3707 
3708  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3709 
3710  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
3711  /// is unhandled by the current target.
3712  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3713 
3714  llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
3715  const llvm::CmpInst::Predicate Fp,
3716  const llvm::CmpInst::Predicate Ip,
3717  const llvm::Twine &Name = "");
3718  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3719  llvm::Triple::ArchType Arch);
3720 
3721  llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
3722  unsigned LLVMIntrinsic,
3723  unsigned AltLLVMIntrinsic,
3724  const char *NameHint,
3725  unsigned Modifier,
3726  const CallExpr *E,
3728  Address PtrOp0, Address PtrOp1,
3729  llvm::Triple::ArchType Arch);
3730 
3731  llvm::Value *EmitISOVolatileLoad(const CallExpr *E);
3732  llvm::Value *EmitISOVolatileStore(const CallExpr *E);
3733 
3734  llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
3735  unsigned Modifier, llvm::Type *ArgTy,
3736  const CallExpr *E);
3737  llvm::Value *EmitNeonCall(llvm::Function *F,
3739  const char *name,
3740  unsigned shift = 0, bool rightshift = false);
3741  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
3742  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
3743  bool negateForRightShift);
3744  llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
3745  llvm::Type *Ty, bool usgn, const char *name);
3746  llvm::Value *vectorWrapScalar16(llvm::Value *Op);
3747  llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3748  llvm::Triple::ArchType Arch);
3749 
3750  llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
3751  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3752  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3753  llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3754  llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3755  llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3756  llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
3757  const CallExpr *E);
3758  llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3759 
3760 private:
3761  enum class MSVCIntrin;
3762 
3763 public:
3764  llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
3765 
3766  llvm::Value *EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args);
3767 
3768  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
3769  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
3770  llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
3771  llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
3772  llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
3773  llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
3774  const ObjCMethodDecl *MethodWithObjects);
3775  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
3776  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
3777  ReturnValueSlot Return = ReturnValueSlot());
3778 
3779  /// Retrieves the default cleanup kind for an ARC cleanup.
3780  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
3782  return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
3784  }
3785 
3786  // ARC primitives.
3787  void EmitARCInitWeak(Address addr, llvm::Value *value);
3788  void EmitARCDestroyWeak(Address addr);
3789  llvm::Value *EmitARCLoadWeak(Address addr);
3790  llvm::Value *EmitARCLoadWeakRetained(Address addr);
3791  llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
3792  void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3793  void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3794  void EmitARCCopyWeak(Address dst, Address src);
3795  void EmitARCMoveWeak(Address dst, Address src);
3796  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
3797  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
3798  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
3799  bool resultIgnored);
3800  llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
3801  bool resultIgnored);
3802  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
3803  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
3804  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
3805  void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
3806  void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
3807  llvm::Value *EmitARCAutorelease(llvm::Value *value);
3808  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
3809  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
3810  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
3811  llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
3812 
3813  llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
3814  llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
3815  llvm::Type *returnType);
3816  void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
3817 
3818  std::pair<LValue,llvm::Value*>
3819  EmitARCStoreAutoreleasing(const BinaryOperator *e);
3820  std::pair<LValue,llvm::Value*>
3821  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
3822  std::pair<LValue,llvm::Value*>
3823  EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
3824 
3825  llvm::Value *EmitObjCAlloc(llvm::Value *value,
3826  llvm::Type *returnType);
3827  llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
3828  llvm::Type *returnType);
3829  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
3830  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
3831  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
3832 
3833  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
3834  llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
3835  bool allowUnsafeClaim);
3836  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
3837  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
3838  llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
3839 
3840  void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
3841 
3842  static Destroyer destroyARCStrongImprecise;
3843  static Destroyer destroyARCStrongPrecise;
3844  static Destroyer destroyARCWeak;
3845  static Destroyer emitARCIntrinsicUse;
3846  static Destroyer destroyNonTrivialCStruct;
3847 
3848  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
3849  llvm::Value *EmitObjCAutoreleasePoolPush();
3850  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
3851  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
3852  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
3853 
3854  /// Emits a reference binding to the passed in expression.
3855  RValue EmitReferenceBindingToExpr(const Expr *E);
3856 
3857  //===--------------------------------------------------------------------===//
3858  // Expression Emission
3859  //===--------------------------------------------------------------------===//
3860 
3861  // Expressions are broken into three classes: scalar, complex, aggregate.
3862 
3863  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
3864  /// scalar type, returning the result.
3865  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
3866 
3867  /// Emit a conversion from the specified type to the specified destination
3868  /// type, both of which are LLVM scalar types.
3869  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
3870  QualType DstTy, SourceLocation Loc);
3871 
3872  /// Emit a conversion from the specified complex type to the specified
3873  /// destination type, where the destination type is an LLVM scalar type.
3874  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
3875  QualType DstTy,
3876  SourceLocation Loc);
3877 
3878  /// EmitAggExpr - Emit the computation of the specified expression
3879  /// of aggregate type. The result is computed into the given slot,
3880  /// which may be null to indicate that the value is not needed.
3881  void EmitAggExpr(const Expr *E, AggValueSlot AS);
3882 
3883  /// EmitAggExprToLValue - Emit the computation of the specified expression of
3884  /// aggregate type into a temporary LValue.
3885  LValue EmitAggExprToLValue(const Expr *E);
3886 
3887  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3888  /// make sure it survives garbage collection until this point.
3889  void EmitExtendGCLifetime(llvm::Value *object);
3890 
3891  /// EmitComplexExpr - Emit the computation of the specified expression of
3892  /// complex type, returning the result.
3893  ComplexPairTy EmitComplexExpr(const Expr *E,
3894  bool IgnoreReal = false,
3895  bool IgnoreImag = false);
3896 
3897  /// EmitComplexExprIntoLValue - Emit the given expression of complex
3898  /// type and place its result into the specified l-value.
3899  void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
3900 
3901  /// EmitStoreOfComplex - Store a complex number into the specified l-value.
3902  void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
3903 
3904  /// EmitLoadOfComplex - Load a complex number from the specified l-value.
3905  ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
3906 
3907  Address emitAddrOfRealComponent(Address complex, QualType complexType);
3908  Address emitAddrOfImagComponent(Address complex, QualType complexType);
3909 
3910  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
3911  /// global variable that has already been created for it. If the initializer
3912  /// has a different type than GV does, this may free GV and return a different
3913  /// one. Otherwise it just returns GV.
3914  llvm::GlobalVariable *
3915  AddInitializerToStaticVarDecl(const VarDecl &D,
3916  llvm::GlobalVariable *GV);
3917 
3918  // Emit an @llvm.invariant.start call for the given memory region.
3919  void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
3920 
3921  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
3922  /// variable with global storage.
3923  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
3924  bool PerformInit);
3925 
3926  llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::Constant *Dtor,
3927  llvm::Constant *Addr);
3928 
3929  /// Call atexit() with a function that passes the given argument to
3930  /// the given function.
3931  void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
3932  llvm::Constant *addr);
3933 
3934  /// Call atexit() with function dtorStub.
3935  void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
3936 
3937  /// Emit code in this function to perform a guarded variable
3938  /// initialization. Guarded initializations are used when it's not
3939  /// possible to prove that an initialization will be done exactly
3940  /// once, e.g. with a static local variable or a static data member
3941  /// of a class template.
3942  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
3943  bool PerformInit);
3944 
3945  enum class GuardKind { VariableGuard, TlsGuard };
3946 
3947  /// Emit a branch to select whether or not to perform guarded initialization.
3948  void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
3949  llvm::BasicBlock *InitBlock,
3950  llvm::BasicBlock *NoInitBlock,
3951  GuardKind Kind, const VarDecl *D);
3952 
3953  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
3954  /// variables.
3955  void
3956  GenerateCXXGlobalInitFunc(llvm::Function *Fn,
3957  ArrayRef<llvm::Function *> CXXThreadLocals,
3958  ConstantAddress Guard = ConstantAddress::invalid());
3959 
3960  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
3961  /// variables.
3962  void GenerateCXXGlobalDtorsFunc(
3963  llvm::Function *Fn,
3964  const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
3965  &DtorsAndObjects);
3966 
3967  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
3968  const VarDecl *D,
3969  llvm::GlobalVariable *Addr,
3970  bool PerformInit);
3971 
3972  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
3973 
3974  void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
3975 
3977  if (const auto *EWC = dyn_cast<ExprWithCleanups>(E))
3978  if (EWC->getNumObjects() == 0)
3979  return;
3980  enterNonTrivialFullExpression(E);
3981  }
3982  void enterNonTrivialFullExpression(const FullExpr *E);
3983 
3984  void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
3985 
3986  RValue EmitAtomicExpr(AtomicExpr *E);
3987 
3988  //===--------------------------------------------------------------------===//
3989  // Annotations Emission
3990  //===--------------------------------------------------------------------===//
3991 
3992  /// Emit an annotation call (intrinsic or builtin).
3993  llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
3994  llvm::Value *AnnotatedVal,
3995  StringRef AnnotationStr,
3996  SourceLocation Location);
3997 
3998  /// Emit local annotations for the local variable V, declared by D.
3999  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
4000 
4001  /// Emit field annotations for the given field & value. Returns the
4002  /// annotation result.
4003  Address EmitFieldAnnotations(const FieldDecl *D, Address V);
4004 
4005  //===--------------------------------------------------------------------===//
4006  // Internal Helpers
4007  //===--------------------------------------------------------------------===//
4008 
4009  /// ContainsLabel - Return true if the statement contains a label in it. If
4010  /// this statement is not executed normally, it not containing a label means
4011  /// that we can just remove the code.
4012  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
4013 
4014  /// containsBreak - Return true if the statement contains a break out of it.
4015  /// If the statement (recursively) contains a switch or loop with a break
4016  /// inside of it, this is fine.
4017  static bool containsBreak(const Stmt *S);
4018 
4019  /// Determine if the given statement might introduce a declaration into the
4020  /// current scope, by being a (possibly-labelled) DeclStmt.
4021  static bool mightAddDeclToScope(const Stmt *S);
4022 
4023  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4024  /// to a constant, or if it does but contains a label, return false. If it
4025  /// constant folds return true and set the boolean result in Result.
4026  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
4027  bool AllowLabels = false);
4028 
4029  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4030  /// to a constant, or if it does but contains a label, return false. If it
4031  /// constant folds return true and set the folded value.
4032  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
4033  bool AllowLabels = false);
4034 
4035  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
4036  /// if statement) to the specified blocks. Based on the condition, this might
4037  /// try to simplify the codegen of the conditional based on the branch.
4038  /// TrueCount should be the number of times we expect the condition to
4039  /// evaluate to true based on PGO data.
4040  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
4041  llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
4042 
4043  /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
4044  /// nonnull, if \p LHS is marked _Nonnull.
4045  void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
4046 
4047  /// An enumeration which makes it easier to specify whether or not an
4048  /// operation is a subtraction.
4049  enum { NotSubtraction = false, IsSubtraction = true };
4050 
4051  /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
4052  /// detect undefined behavior when the pointer overflow sanitizer is enabled.
4053  /// \p SignedIndices indicates whether any of the GEP indices are signed.
4054  /// \p IsSubtraction indicates whether the expression used to form the GEP
4055  /// is a subtraction.
4056  llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
4057  ArrayRef<llvm::Value *> IdxList,
4058  bool SignedIndices,
4059  bool IsSubtraction,
4060  SourceLocation Loc,
4061  const Twine &Name = "");
4062 
4063  /// Specifies which type of sanitizer check to apply when handling a
4064  /// particular builtin.
4068  };
4069 
4070  /// Emits an argument for a call to a builtin. If the builtin sanitizer is
4071  /// enabled, a runtime check specified by \p Kind is also emitted.
4072  llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
4073 
4074  /// Emit a description of a type in a format suitable for passing to
4075  /// a runtime sanitizer handler.
4076  llvm::Constant *EmitCheckTypeDescriptor(QualType T);
4077 
4078  /// Convert a value into a format suitable for passing to a runtime
4079  /// sanitizer handler.
4080  llvm::Value *EmitCheckValue(llvm::Value *V);
4081 
4082  /// Emit a description of a source location in a format suitable for
4083  /// passing to a runtime sanitizer handler.
4084  llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
4085 
4086  /// Create a basic block that will call a handler function in a
4087  /// sanitizer runtime with the provided arguments, and create a conditional
4088  /// branch to it.
4089  void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
4090  SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
4091  ArrayRef<llvm::Value *> DynamicArgs);
4092 
4093  /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
4094  /// if Cond if false.
4095  void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
4096  llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4097  ArrayRef<llvm::Constant *> StaticArgs);
4098 
4099  /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
4100  /// checking is enabled. Otherwise, just emit an unreachable instruction.
4101  void EmitUnreachable(SourceLocation Loc);
4102 
4103  /// Create a basic block that will call the trap intrinsic, and emit a
4104  /// conditional branch to it, for the -ftrapv checks.
4105  void EmitTrapCheck(llvm::Value *Checked);
4106 
4107  /// Emit a call to trap or debugtrap and attach function attribute
4108  /// "trap-func-name" if specified.
4109  llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
4110 
4111  /// Emit a stub for the cross-DSO CFI check function.
4112  void EmitCfiCheckStub();
4113 
4114  /// Emit a cross-DSO CFI failure handling function.
4115  void EmitCfiCheckFail();
4116 
4117  /// Create a check for a function parameter that may potentially be
4118  /// declared as non-null.
4119  void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
4120  AbstractCallee AC, unsigned ParmNum);
4121 
4122  /// EmitCallArg - Emit a single call argument.
4123  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
4124 
4125  /// EmitDelegateCallArg - We are performing a delegate call; that
4126  /// is, the current function is delegating to another one. Produce
4127  /// a r-value suitable for passing the given parameter.
4128  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
4129  SourceLocation loc);
4130 
4131  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
4132  /// point operation, expressed as the maximum relative error in ulp.
4133  void SetFPAccuracy(llvm::Value *Val, float Accuracy);
4134 
4135 private:
4136  llvm::MDNode *getRangeForLoadFromType(QualType Ty);
4137  void EmitReturnOfRValue(RValue RV, QualType Ty);
4138 
4139  void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
4140 
4142  DeferredReplacements;
4143 
4144  /// Set the address of a local variable.
4145  void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
4146  assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
4147  LocalDeclMap.insert({VD, Addr});
4148  }
4149 
4150  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
4151  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
4152  ///
4153  /// \param AI - The first function argument of the expansion.
4154  void ExpandTypeFromArgs(QualType Ty, LValue Dst,
4156 
4157  /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
4158  /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
4159  /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
4160  void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
4161  SmallVectorImpl<llvm::Value *> &IRCallArgs,
4162  unsigned &IRCallArgPos);
4163 
4164  llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
4165  const Expr *InputExpr, std::string &ConstraintStr);
4166 
4167  llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
4168  LValue InputValue, QualType InputType,
4169  std::string &ConstraintStr,
4170  SourceLocation Loc);
4171 
4172  /// Attempts to statically evaluate the object size of E. If that
4173  /// fails, emits code to figure the size of E out for us. This is
4174  /// pass_object_size aware.
4175  ///
4176  /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
4177  llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
4178  llvm::IntegerType *ResType,
4179  llvm::Value *EmittedE);
4180 
4181  /// Emits the size of E, as required by __builtin_object_size. This
4182  /// function is aware of pass_object_size parameters, and will act accordingly
4183  /// if E is a parameter with the pass_object_size attribute.
4184  llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
4185  llvm::IntegerType *ResType,
4186  llvm::Value *EmittedE);
4187 
4188 public:
4189 #ifndef NDEBUG
4190  // Determine whether the given argument is an Objective-C method
4191  // that may have type parameters in its signature.
4192  static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4193  const DeclContext *dc = method->getDeclContext();
4194  if (const ObjCInterfaceDecl *classDecl= dyn_cast<ObjCInterfaceDecl>(dc)) {
4195  return classDecl->getTypeParamListAsWritten();
4196  }
4197 
4198  if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4199  return catDecl->getTypeParamList();
4200  }
4201 
4202  return false;
4203  }
4204 
4205  template<typename T>
4206  static bool isObjCMethodWithTypeParams(const T *) { return false; }
4207 #endif
4208 
4209  enum class EvaluationOrder {
4210  ///! No language constraints on evaluation order.
4211  Default,
4212  ///! Language semantics require left-to-right evaluation.
4213  ForceLeftToRight,
4214  ///! Language semantics require right-to-left evaluation.
4215  ForceRightToLeft
4216  };
4217 
4218  /// EmitCallArgs - Emit call arguments for a function.
4219  template <typename T>
4220  void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
4221  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4223  unsigned ParamsToSkip = 0,
4224  EvaluationOrder Order = EvaluationOrder::Default) {
4225  SmallVector<QualType, 16> ArgTypes;
4226  CallExpr::const_arg_iterator Arg = ArgRange.begin();
4227 
4228  assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
4229  "Can't skip parameters if type info is not provided");
4230  if (CallArgTypeInfo) {
4231 #ifndef NDEBUG
4232  bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo);
4233 #endif
4234 
4235  // First, use the argument types that the type info knows about
4236  for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
4237  E = CallArgTypeInfo->param_type_end();
4238  I != E; ++I, ++Arg) {
4239  assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4240  assert((isGenericMethod ||
4241  ((*I)->isVariablyModifiedType() ||
4242  (*I).getNonReferenceType()->isObjCRetainableType() ||
4243  getContext()
4244  .getCanonicalType((*I).getNonReferenceType())
4245  .getTypePtr() ==
4246  getContext()
4247  .getCanonicalType((*Arg)->getType())
4248  .getTypePtr())) &&
4249  "type mismatch in call argument!");
4250  ArgTypes.push_back(*I);
4251  }
4252  }
4253 
4254  // Either we've emitted all the call args, or we have a call to variadic
4255  // function.
4256  assert((Arg == ArgRange.end() || !CallArgTypeInfo ||
4257  CallArgTypeInfo->isVariadic()) &&
4258  "Extra arguments in non-variadic function!");
4259 
4260  // If we still have any arguments, emit them using the type of the argument.
4261  for (auto *A : llvm::make_range(Arg, ArgRange.end()))
4262  ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType());
4263 
4264  EmitCallArgs(Args, ArgTypes, ArgRange, AC, ParamsToSkip, Order);
4265  }
4266 
4267  void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
4268  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4270  unsigned ParamsToSkip = 0,
4271  EvaluationOrder Order = EvaluationOrder::Default);
4272 
4273  /// EmitPointerWithAlignment - Given an expression with a pointer type,
4274  /// emit the value and compute our best estimate of the alignment of the
4275  /// pointee.
4276  ///
4277  /// \param BaseInfo - If non-null, this will be initialized with
4278  /// information about the source of the alignment and the may-alias
4279  /// attribute. Note that this function will conservatively fall back on
4280  /// the type when it doesn't recognize the expression and may-alias will
4281  /// be set to false.
4282  ///
4283  /// One reasonable way to use this information is when there's a language
4284  /// guarantee that the pointer must be aligned to some stricter value, and
4285  /// we're simply trying to ensure that sufficiently obvious uses of under-
4286  /// aligned objects don't get miscompiled; for example, a placement new
4287  /// into the address of a local variable. In such a case, it's quite
4288  /// reasonable to just ignore the returned alignment when it isn't from an
4289  /// explicit source.
4290  Address EmitPointerWithAlignment(const Expr *Addr,
4291  LValueBaseInfo *BaseInfo = nullptr,
4292  TBAAAccessInfo *TBAAInfo = nullptr);
4293 
4294  /// If \p E references a parameter with pass_object_size info or a constant
4295  /// array size modifier, emit the object size divided by the size of \p EltTy.
4296  /// Otherwise return null.
4297  llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
4298 
4299  void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
4300 
4302  llvm::Function *Function;
4304  struct Conds {
4305  StringRef Architecture;
4307 
4308  Conds(StringRef Arch, ArrayRef<StringRef> Feats)
4309  : Architecture(Arch), Features(Feats.begin(), Feats.end()) {}
4310  } Conditions;
4311 
4312  MultiVersionResolverOption(llvm::Function *F, StringRef Arch,
4313  ArrayRef<StringRef> Feats)
4314  : Function(F), Conditions(Arch, Feats) {}
4315  };
4316 
4317  // Emits the body of a multiversion function's resolver. Assumes that the
4318  // options are already sorted in the proper order, with the 'default' option
4319  // last (if it exists).
4320  void EmitMultiVersionResolver(llvm::Function *Resolver,
4322 
4323  static uint64_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
4324 
4325 private:
4326  QualType getVarArgType(const Expr *Arg);
4327 
4328  void EmitDeclMetadata();
4329 
4330  BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
4331  const AutoVarEmission &emission);
4332 
4333  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
4334 
4335  llvm::Value *GetValueForARMHint(unsigned BuiltinID);
4336  llvm::Value *EmitX86CpuIs(const CallExpr *E);
4337  llvm::Value *EmitX86CpuIs(StringRef CPUStr);
4338  llvm::Value *EmitX86CpuSupports(const CallExpr *E);
4339  llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
4340  llvm::Value *EmitX86CpuSupports(uint64_t Mask);
4341  llvm::Value *EmitX86CpuInit();
4342  llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
4343 };
4344 
4346 DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
4347  if (!needsSaving(value)) return saved_type(value, false);
4348 
4349  // Otherwise, we need an alloca.
4350  auto align = CharUnits::fromQuantity(
4351  CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
4352  Address alloca =
4353  CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
4354  CGF.Builder.CreateStore(value, alloca);
4355 
4356  return saved_type(alloca.getPointer(), true);
4357 }
4358 
4359 inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
4360  saved_type value) {
4361  // If the value says it wasn't saved, trust that it's still dominating.
4362  if (!value.getInt()) return value.getPointer();
4363 
4364  // Otherwise, it should be an alloca instruction, as set up in save().
4365  auto alloca = cast<llvm::AllocaInst>(value.getPointer());
4366  return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
4367 }
4368 
4369 } // end namespace CodeGen
4370 } // end namespace clang
4371 
4372 #endif
const llvm::DataLayout & getDataLayout() const
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:77
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:360
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
Optional< uint64_t > getStmtCount(const Stmt *S)
Check if an execution count is known for a given statement.
Definition: CodeGenPGO.h:62
This represents &#39;#pragma omp distribute simd&#39; composite directive.
Definition: StmtOpenMP.h:3247
Information about the layout of a __block variable.
Definition: CGBlocks.h:143
This represents &#39;#pragma omp master&#39; directive.
Definition: StmtOpenMP.h:1430
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
This represents &#39;#pragma omp task&#39; directive.
Definition: StmtOpenMP.h:1770
Represents a function declaration or definition.
Definition: Decl.h:1737
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2539
Scheduling data for loop-based OpenMP directives.
Definition: OpenMPKinds.h:140
A (possibly-)qualified type.
Definition: Type.h:637
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Type *Ty, const CXXRecordDecl *RD)
Definition: CGCXX.cpp:257
const CodeGenOptions & getCodeGenOpts() const
The class detects jumps which bypass local variables declaration: goto L; int a; L: ...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:138
void enterFullExpression(const FullExpr *E)
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:125
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::LLVMContext & getLLVMContext()
DominatorTree GraphTraits specialization so the DominatorTree can be iterable by generic graph iterat...
Definition: Dominators.h:29
FieldConstructionScope(CodeGenFunction &CGF, Address This)
Represents a &#39;co_return&#39; statement in the C++ Coroutines TS.
Definition: StmtCXX.h:434
Stmt - This represents one statement.
Definition: Stmt.h:65
IfStmt - This represents an if/then/else.
Definition: Stmt.h:1686
static T * buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo, T &&generator)
Lazily build the copy and dispose helpers for a __block variable with the given information.
Definition: CGBlocks.cpp:2591
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
C Language Family Type Representation.
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it&#39;s the GNU ...
This represents &#39;#pragma omp for simd&#39; directive.
Definition: StmtOpenMP.h:1180
Checking the &#39;this&#39; pointer for a constructor call.
bool hasVolatileMember() const
Definition: Decl.h:3676
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
This represents &#39;#pragma omp teams distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3658
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
static bool classof(const CGCapturedStmtInfo *)
Represents an attribute applied to a statement.
Definition: Stmt.h:1632
static Destroyer destroyARCStrongPrecise
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of &#39;this&#39;.
The base class of the type hierarchy.
Definition: Type.h:1406
This represents &#39;#pragma omp target teams distribute&#39; combined directive.
Definition: StmtOpenMP.h:3795
Represents Objective-C&#39;s @throw statement.
Definition: StmtObjC.h:312
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition: Stmt.h:3222
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2814
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1261
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
DominatingValue< T >::saved_type saveValueInCond(T value)
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const ParmVarDecl * getParamDecl(unsigned I) const
This represents &#39;#pragma omp parallel for&#39; directive.
Definition: StmtOpenMP.h:1551
void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S, llvm::Value *StepV)
Definition: CodeGenPGO.cpp:892
This represents &#39;#pragma omp target teams distribute parallel for&#39; combined directive.
Definition: StmtOpenMP.h:3863
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2483
Represents a prvalue temporary that is written into memory so that a reference can bind to it...
Definition: ExprCXX.h:4155
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
static type restore(CodeGenFunction &CGF, saved_type value)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
Represents a point when we exit a loop.
Definition: ProgramPoint.h:714
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3073
This represents &#39;#pragma omp target exit data&#39; directive.
Definition: StmtOpenMP.h:2462
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:812
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC &#39;id&#39; type.
Definition: ExprObjC.h:1436
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:2931
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6754
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:53
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:138
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
llvm::Value * getPointer() const
Definition: Address.h:37
static ConstantEmission forValue(llvm::Constant *C)
capture_iterator capture_begin()
Retrieve an iterator pointing to the first capture.
Definition: Stmt.h:3247
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1014
Represents a parameter to a function.
Definition: Decl.h:1549
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have...
Definition: Linkage.h:23
Defines the clang::Expr interface and subclasses for C++ expressions.
The collection of all-type qualifiers we support.
Definition: Type.h:140
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:1592
Represents a struct/union/class.
Definition: Decl.h:3592
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:197
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
void setScopeDepth(EHScopeStack::stable_iterator depth)
This represents &#39;#pragma omp parallel&#39; directive.
Definition: StmtOpenMP.h:275
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:883
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
Represents a member of a struct/union/class.
Definition: Decl.h:2578
Definition: Format.h:2071
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
bool isReferenceType() const
Definition: Type.h:6310
Helper class with most of the code for saving a value for a conditional expression cleanup...
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code...
This represents &#39;#pragma omp target simd&#39; directive.
Definition: StmtOpenMP.h:3383
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1009
Defines some OpenMP-specific enums and functions.
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:5122
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself...
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function...
Definition: EHScopeStack.h:65
This represents &#39;#pragma omp barrier&#39; directive.
Definition: StmtOpenMP.h:1882
CleanupKind getCleanupKind(QualType::DestructionKind kind)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:49
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp, [NSNumber numberWithInt:42]];.
Definition: ExprObjC.h:170
The this pointer adjustment as well as an optional return adjustment for a thunk. ...
Definition: ABI.h:178
This is a common base class for loop directives (&#39;omp simd&#39;, &#39;omp for&#39;, &#39;omp for simd&#39; etc...
Definition: StmtOpenMP.h:337
This represents &#39;#pragma omp critical&#39; directive.
Definition: StmtOpenMP.h:1477
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:193
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
OpenMPDistScheduleClauseKind
OpenMP attributes for &#39;dist_schedule&#39; clause.
Definition: OpenMPKinds.h:108
bool isGLValue() const
Definition: Expr.h:252
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2338
Describes an C or C++ initializer list.
Definition: Expr.h:4191
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:668
This represents &#39;#pragma omp distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3098
void setCurrentRegionCount(uint64_t Count)
Set the counter value for the current region.
Definition: CodeGenPGO.h:58
A class controlling the emission of a finally block.
This represents &#39;#pragma omp teams distribute parallel for simd&#39; composite directive.
Definition: StmtOpenMP.h:3587
BinaryOperatorKind
static bool hasScalarEvaluationKind(QualType T)
ForStmt - This represents a &#39;for (init;cond;inc)&#39; stmt.
Definition: Stmt.h:2236
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
MultiVersionResolverOption(llvm::Function *F, StringRef Arch, ArrayRef< StringRef > Feats)
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:968
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
llvm::function_ref< std::pair< LValue, LValue > CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
LexicalScope(CodeGenFunction &CGF, SourceRange Range)
Enter a new cleanup scope.
RAII for correct setting/restoring of CapturedStmtInfo.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
Represents a declaration of a type.
Definition: Decl.h:2873
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3293
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind...
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
CXXForRangeStmt - This represents C++0x [stmt.ranged]&#39;s ranged for statement, represented as &#39;for (ra...
Definition: StmtCXX.h:126
#define LIST_SANITIZER_CHECKS
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
This represents &#39;#pragma omp cancellation point&#39; directive.
Definition: StmtOpenMP.h:2717
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:50
Expr * getSizeExpr() const
Definition: Type.h:2993
field_iterator field_begin() const
Definition: Decl.cpp:4144
CaseStmt - Represent a case statement.
Definition: Stmt.h:1393
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:99
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
This represents &#39;#pragma omp teams&#39; directive.
Definition: StmtOpenMP.h:2660
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
Enums/classes describing ABI related information about constructors, destructors and thunks...
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3004
This represents &#39;#pragma omp teams distribute simd&#39; combined directive.
Definition: StmtOpenMP.h:3517
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1216
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
Controls insertion of cancellation exit blocks in worksharing constructs.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
CallLifetimeEnd(Address addr, llvm::Value *size)
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * > CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:105
Represents an ObjC class declaration.
Definition: DeclObjC.h:1171
Checking the operand of a cast to a virtual base object.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
Checking the operand of a load. Must be suitably sized and aligned.
~LexicalScope()
Exit this cleanup scope, emitting any accumulated cleanups.
Checking the &#39;this&#39; pointer for a call to a non-static member function.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2758
This represents &#39;#pragma omp target parallel for simd&#39; directive.
Definition: StmtOpenMP.h:3315
OpenMP 4.0 [2.4, Array Sections].
Definition: ExprOpenMP.h:44
Const iterator for iterating over Stmt * arrays that contain only Expr *.
Definition: Stmt.h:996
bool isValid() const
Definition: Address.h:35
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2285
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1240
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3689
Describes the capture of either a variable, or &#39;this&#39;, or variable-length array type.
Definition: Stmt.h:3117
This represents &#39;#pragma omp taskgroup&#39; directive.
Definition: StmtOpenMP.h:1970
const TargetCodeGenInfo & getTargetCodeGenInfo()
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:152
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
The class used to assign some variables some temporarily addresses.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4097
AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression.
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
This represents &#39;#pragma omp distribute&#39; directive.
Definition: StmtOpenMP.h:2971
Exposes information about the current target.
Definition: TargetInfo.h:53
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
bool addPrivate(const VarDecl *LocalVD, const llvm::function_ref< Address()> PrivateGen)
Registers LocalVD variable as a private and apply PrivateGen function for it to generate correspondin...
EHScopeStack::stable_iterator getScopeDepth() const
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:636
This represents one expression.
Definition: Expr.h:106
Address getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups...
Definition: EHScopeStack.h:355
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
static ParamValue forIndirect(Address addr)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:5183
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2705
This represents &#39;#pragma omp target teams distribute parallel for simd&#39; combined directive.
Definition: StmtOpenMP.h:3947
static saved_type save(CodeGenFunction &CGF, type value)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp")
CreateAggTemp - Create a temporary memory object for the given aggregate type.
#define bool
Definition: stdbool.h:31
unsigned Kind
The kind of cleanup to push: a value from the CleanupKind enumeration.
unsigned Size
The size of the following cleanup object.
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:287
DeclContext * getDeclContext()
Definition: DeclBase.h:426