clang  9.0.0svn
CodeGenFunction.h
Go to the documentation of this file.
1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the internal per-function state used for llvm translation.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14 #define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15 
16 #include "CGBuilder.h"
17 #include "CGDebugInfo.h"
18 #include "CGLoopInfo.h"
19 #include "CGValue.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "EHScopeStack.h"
23 #include "VarBypassDetector.h"
24 #include "clang/AST/CharUnits.h"
25 #include "clang/AST/ExprCXX.h"
26 #include "clang/AST/ExprObjC.h"
27 #include "clang/AST/ExprOpenMP.h"
28 #include "clang/AST/Type.h"
29 #include "clang/Basic/ABI.h"
33 #include "clang/Basic/TargetInfo.h"
34 #include "llvm/ADT/ArrayRef.h"
35 #include "llvm/ADT/DenseMap.h"
36 #include "llvm/ADT/MapVector.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/IR/ValueHandle.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Transforms/Utils/SanitizerStats.h"
41 
42 namespace llvm {
43 class BasicBlock;
44 class LLVMContext;
45 class MDNode;
46 class Module;
47 class SwitchInst;
48 class Twine;
49 class Value;
50 }
51 
52 namespace clang {
53 class ASTContext;
54 class BlockDecl;
55 class CXXDestructorDecl;
56 class CXXForRangeStmt;
57 class CXXTryStmt;
58 class Decl;
59 class LabelDecl;
60 class EnumConstantDecl;
61 class FunctionDecl;
62 class FunctionProtoType;
63 class LabelStmt;
64 class ObjCContainerDecl;
65 class ObjCInterfaceDecl;
66 class ObjCIvarDecl;
67 class ObjCMethodDecl;
68 class ObjCImplementationDecl;
69 class ObjCPropertyImplDecl;
70 class TargetInfo;
71 class VarDecl;
72 class ObjCForCollectionStmt;
73 class ObjCAtTryStmt;
74 class ObjCAtThrowStmt;
75 class ObjCAtSynchronizedStmt;
76 class ObjCAutoreleasePoolStmt;
77 
78 namespace analyze_os_log {
79 class OSLogBufferLayout;
80 }
81 
82 namespace CodeGen {
83 class CodeGenTypes;
84 class CGCallee;
85 class CGFunctionInfo;
86 class CGRecordLayout;
87 class CGBlockInfo;
88 class CGCXXABI;
89 class BlockByrefHelpers;
90 class BlockByrefInfo;
91 class BlockFlags;
92 class BlockFieldFlags;
93 class RegionCodeGenTy;
94 class TargetCodeGenInfo;
95 struct OMPTaskDataTy;
96 struct CGCoroData;
97 
98 /// The kind of evaluation to perform on values of a particular
99 /// type. Basically, is the code in CGExprScalar, CGExprComplex, or
100 /// CGExprAgg?
101 ///
102 /// TODO: should vectors maybe be split out into their own thing?
107 };
108 
109 #define LIST_SANITIZER_CHECKS \
110  SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
111  SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
112  SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
113  SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
114  SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
115  SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
116  SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
117  SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
118  SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
119  SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
120  SANITIZER_CHECK(MissingReturn, missing_return, 0) \
121  SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
122  SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
123  SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
124  SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
125  SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
126  SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
127  SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
128  SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
129  SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
130  SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
131  SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
132  SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
133  SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
134 
136 #define SANITIZER_CHECK(Enum, Name, Version) Enum,
138 #undef SANITIZER_CHECK
139 };
140 
141 /// Helper class with most of the code for saving a value for a
142 /// conditional expression cleanup.
144  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
145 
146  /// Answer whether the given value needs extra work to be saved.
147  static bool needsSaving(llvm::Value *value) {
148  // If it's not an instruction, we don't need to save.
149  if (!isa<llvm::Instruction>(value)) return false;
150 
151  // If it's an instruction in the entry block, we don't need to save.
152  llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
153  return (block != &block->getParent()->getEntryBlock());
154  }
155 
156  static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
157  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
158 };
159 
160 /// A partial specialization of DominatingValue for llvm::Values that
161 /// might be llvm::Instructions.
162 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
163  typedef T *type;
164  static type restore(CodeGenFunction &CGF, saved_type value) {
165  return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
166  }
167 };
168 
169 /// A specialization of DominatingValue for Address.
170 template <> struct DominatingValue<Address> {
171  typedef Address type;
172 
173  struct saved_type {
176  };
177 
178  static bool needsSaving(type value) {
179  return DominatingLLVMValue::needsSaving(value.getPointer());
180  }
181  static saved_type save(CodeGenFunction &CGF, type value) {
182  return { DominatingLLVMValue::save(CGF, value.getPointer()),
183  value.getAlignment() };
184  }
185  static type restore(CodeGenFunction &CGF, saved_type value) {
186  return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
187  value.Alignment);
188  }
189 };
190 
191 /// A specialization of DominatingValue for RValue.
192 template <> struct DominatingValue<RValue> {
193  typedef RValue type;
194  class saved_type {
195  enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
196  AggregateAddress, ComplexAddress };
197 
199  unsigned K : 3;
200  unsigned Align : 29;
201  saved_type(llvm::Value *v, Kind k, unsigned a = 0)
202  : Value(v), K(k), Align(a) {}
203 
204  public:
205  static bool needsSaving(RValue value);
206  static saved_type save(CodeGenFunction &CGF, RValue value);
207  RValue restore(CodeGenFunction &CGF);
208 
209  // implementations in CGCleanup.cpp
210  };
211 
212  static bool needsSaving(type value) {
213  return saved_type::needsSaving(value);
214  }
215  static saved_type save(CodeGenFunction &CGF, type value) {
216  return saved_type::save(CGF, value);
217  }
218  static type restore(CodeGenFunction &CGF, saved_type value) {
219  return value.restore(CGF);
220  }
221 };
222 
223 /// CodeGenFunction - This class organizes the per-function state that is used
224 /// while generating LLVM code.
226  CodeGenFunction(const CodeGenFunction &) = delete;
227  void operator=(const CodeGenFunction &) = delete;
228 
229  friend class CGCXXABI;
230 public:
231  /// A jump destination is an abstract label, branching to which may
232  /// require a jump out through normal cleanups.
233  struct JumpDest {
234  JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
235  JumpDest(llvm::BasicBlock *Block,
237  unsigned Index)
238  : Block(Block), ScopeDepth(Depth), Index(Index) {}
239 
240  bool isValid() const { return Block != nullptr; }
241  llvm::BasicBlock *getBlock() const { return Block; }
242  EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
243  unsigned getDestIndex() const { return Index; }
244 
245  // This should be used cautiously.
247  ScopeDepth = depth;
248  }
249 
250  private:
251  llvm::BasicBlock *Block;
253  unsigned Index;
254  };
255 
256  CodeGenModule &CGM; // Per-module state.
258 
259  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
262 
263  // Stores variables for which we can't generate correct lifetime markers
264  // because of jumps.
266 
267  // CodeGen lambda for loops and support for ordered clause
268  typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
269  JumpDest)>
271  typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
272  const unsigned, const bool)>
274 
275  // Codegen lambda for loop bounds in worksharing loop constructs
276  typedef llvm::function_ref<std::pair<LValue, LValue>(
279 
280  // Codegen lambda for loop bounds in dispatch-based loop implementation
281  typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
282  CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
283  Address UB)>
285 
286  /// CGBuilder insert helper. This function is called after an
287  /// instruction is created using Builder.
288  void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
289  llvm::BasicBlock *BB,
290  llvm::BasicBlock::iterator InsertPt) const;
291 
292  /// CurFuncDecl - Holds the Decl for the current outermost
293  /// non-closure context.
295  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
299  llvm::Function *CurFn = nullptr;
300 
301  // Holds coroutine data if the current function is a coroutine. We use a
302  // wrapper to manage its lifetime, so that we don't have to define CGCoroData
303  // in this header.
304  struct CGCoroInfo {
305  std::unique_ptr<CGCoroData> Data;
306  CGCoroInfo();
307  ~CGCoroInfo();
308  };
310 
311  bool isCoroutine() const {
312  return CurCoro.Data != nullptr;
313  }
314 
315  /// CurGD - The GlobalDecl for the current function being compiled.
317 
318  /// PrologueCleanupDepth - The cleanup depth enclosing all the
319  /// cleanups associated with the parameters.
321 
322  /// ReturnBlock - Unified return block.
324 
325  /// ReturnValue - The temporary alloca to hold the return
326  /// value. This is invalid iff the function has no return value.
327  Address ReturnValue = Address::invalid();
328 
329  /// Return true if a label was seen in the current scope.
331  if (CurLexicalScope)
332  return CurLexicalScope->hasLabels();
333  return !LabelMap.empty();
334  }
335 
336  /// AllocaInsertPoint - This is an instruction in the entry block before which
337  /// we prefer to insert allocas.
338  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
339 
340  /// API for captured statement code generation.
342  public:
344  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
345  explicit CGCapturedStmtInfo(const CapturedStmt &S,
347  : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
348 
352  E = S.capture_end();
353  I != E; ++I, ++Field) {
354  if (I->capturesThis())
355  CXXThisFieldDecl = *Field;
356  else if (I->capturesVariable())
357  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
358  else if (I->capturesVariableByCopy())
359  CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
360  }
361  }
362 
363  virtual ~CGCapturedStmtInfo();
364 
365  CapturedRegionKind getKind() const { return Kind; }
366 
367  virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
368  // Retrieve the value of the context parameter.
369  virtual llvm::Value *getContextValue() const { return ThisValue; }
370 
371  /// Lookup the captured field decl for a variable.
372  virtual const FieldDecl *lookup(const VarDecl *VD) const {
373  return CaptureFields.lookup(VD->getCanonicalDecl());
374  }
375 
376  bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
377  virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
378 
379  static bool classof(const CGCapturedStmtInfo *) {
380  return true;
381  }
382 
383  /// Emit the captured statement body.
384  virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
386  CGF.EmitStmt(S);
387  }
388 
389  /// Get the name of the capture helper.
390  virtual StringRef getHelperName() const { return "__captured_stmt"; }
391 
392  private:
393  /// The kind of captured statement being generated.
395 
396  /// Keep the map between VarDecl and FieldDecl.
397  llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
398 
399  /// The base address of the captured record, passed in as the first
400  /// argument of the parallel region function.
401  llvm::Value *ThisValue;
402 
403  /// Captured 'this' type.
404  FieldDecl *CXXThisFieldDecl;
405  };
406  CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
407 
408  /// RAII for correct setting/restoring of CapturedStmtInfo.
410  private:
411  CodeGenFunction &CGF;
412  CGCapturedStmtInfo *PrevCapturedStmtInfo;
413  public:
414  CGCapturedStmtRAII(CodeGenFunction &CGF,
415  CGCapturedStmtInfo *NewCapturedStmtInfo)
416  : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
417  CGF.CapturedStmtInfo = NewCapturedStmtInfo;
418  }
419  ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
420  };
421 
422  /// An abstract representation of regular/ObjC call/message targets.
424  /// The function declaration of the callee.
425  const Decl *CalleeDecl;
426 
427  public:
428  AbstractCallee() : CalleeDecl(nullptr) {}
429  AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
430  AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
431  bool hasFunctionDecl() const {
432  return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
433  }
434  const Decl *getDecl() const { return CalleeDecl; }
435  unsigned getNumParams() const {
436  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
437  return FD->getNumParams();
438  return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
439  }
440  const ParmVarDecl *getParamDecl(unsigned I) const {
441  if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
442  return FD->getParamDecl(I);
443  return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
444  }
445  };
446 
447  /// Sanitizers enabled for this function.
449 
450  /// True if CodeGen currently emits code implementing sanitizer checks.
451  bool IsSanitizerScope = false;
452 
453  /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
455  CodeGenFunction *CGF;
456  public:
457  SanitizerScope(CodeGenFunction *CGF);
458  ~SanitizerScope();
459  };
460 
461  /// In C++, whether we are code generating a thunk. This controls whether we
462  /// should emit cleanups.
463  bool CurFuncIsThunk = false;
464 
465  /// In ARC, whether we should autorelease the return value.
466  bool AutoreleaseResult = false;
467 
468  /// Whether we processed a Microsoft-style asm block during CodeGen. These can
469  /// potentially set the return value.
470  bool SawAsmBlock = false;
471 
472  const NamedDecl *CurSEHParent = nullptr;
473 
474  /// True if the current function is an outlined SEH helper. This can be a
475  /// finally block or filter expression.
476  bool IsOutlinedSEHHelper = false;
477 
478  const CodeGen::CGBlockInfo *BlockInfo = nullptr;
479  llvm::Value *BlockPointer = nullptr;
480 
481  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
482  FieldDecl *LambdaThisCaptureField = nullptr;
483 
484  /// A mapping from NRVO variables to the flags used to indicate
485  /// when the NRVO has been applied to this variable.
486  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
487 
491 
492  llvm::Instruction *CurrentFuncletPad = nullptr;
493 
494  class CallLifetimeEnd final : public EHScopeStack::Cleanup {
495  llvm::Value *Addr;
496  llvm::Value *Size;
497 
498  public:
500  : Addr(addr.getPointer()), Size(size) {}
501 
502  void Emit(CodeGenFunction &CGF, Flags flags) override {
503  CGF.EmitLifetimeEnd(Size, Addr);
504  }
505  };
506 
507  /// Header for data within LifetimeExtendedCleanupStack.
509  /// The size of the following cleanup object.
510  unsigned Size;
511  /// The kind of cleanup to push: a value from the CleanupKind enumeration.
512  unsigned Kind : 31;
513  /// Whether this is a conditional cleanup.
514  unsigned IsConditional : 1;
515 
516  size_t getSize() const { return Size; }
517  CleanupKind getKind() const { return (CleanupKind)Kind; }
518  bool isConditional() const { return IsConditional; }
519  };
520 
521  /// i32s containing the indexes of the cleanup destinations.
522  Address NormalCleanupDest = Address::invalid();
523 
524  unsigned NextCleanupDestIndex = 1;
525 
526  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
527  CGBlockInfo *FirstBlockInfo = nullptr;
528 
529  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
530  llvm::BasicBlock *EHResumeBlock = nullptr;
531 
532  /// The exception slot. All landing pads write the current exception pointer
533  /// into this alloca.
534  llvm::Value *ExceptionSlot = nullptr;
535 
536  /// The selector slot. Under the MandatoryCleanup model, all landing pads
537  /// write the current selector value into this alloca.
538  llvm::AllocaInst *EHSelectorSlot = nullptr;
539 
540  /// A stack of exception code slots. Entering an __except block pushes a slot
541  /// on the stack and leaving pops one. The __exception_code() intrinsic loads
542  /// a value from the top of the stack.
544 
545  /// Value returned by __exception_info intrinsic.
546  llvm::Value *SEHInfo = nullptr;
547 
548  /// Emits a landing pad for the current EH stack.
549  llvm::BasicBlock *EmitLandingPad();
550 
551  llvm::BasicBlock *getInvokeDestImpl();
552 
553  template <class T>
555  return DominatingValue<T>::save(*this, value);
556  }
557 
558 public:
559  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
560  /// rethrows.
562 
563  /// A class controlling the emission of a finally block.
564  class FinallyInfo {
565  /// Where the catchall's edge through the cleanup should go.
566  JumpDest RethrowDest;
567 
568  /// A function to call to enter the catch.
569  llvm::FunctionCallee BeginCatchFn;
570 
571  /// An i1 variable indicating whether or not the @finally is
572  /// running for an exception.
573  llvm::AllocaInst *ForEHVar;
574 
575  /// An i8* variable into which the exception pointer to rethrow
576  /// has been saved.
577  llvm::AllocaInst *SavedExnVar;
578 
579  public:
580  void enter(CodeGenFunction &CGF, const Stmt *Finally,
581  llvm::FunctionCallee beginCatchFn,
582  llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
583  void exit(CodeGenFunction &CGF);
584  };
585 
586  /// Returns true inside SEH __try blocks.
587  bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
588 
589  /// Returns true while emitting a cleanuppad.
590  bool isCleanupPadScope() const {
591  return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
592  }
593 
594  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
595  /// current full-expression. Safe against the possibility that
596  /// we're currently inside a conditionally-evaluated expression.
597  template <class T, class... As>
599  // If we're not in a conditional branch, or if none of the
600  // arguments requires saving, then use the unconditional cleanup.
601  if (!isInConditionalBranch())
602  return EHStack.pushCleanup<T>(kind, A...);
603 
604  // Stash values in a tuple so we can guarantee the order of saves.
605  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
606  SavedTuple Saved{saveValueInCond(A)...};
607 
608  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
609  EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
610  initFullExprCleanup();
611  }
612 
613  /// Queue a cleanup to be pushed after finishing the current
614  /// full-expression.
615  template <class T, class... As>
617  if (!isInConditionalBranch())
618  return pushCleanupAfterFullExprImpl<T>(Kind, Address::invalid(), A...);
619 
620  Address ActiveFlag = createCleanupActiveFlag();
621  assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
622  "cleanup active flag should never need saving");
623 
624  typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
625  SavedTuple Saved{saveValueInCond(A)...};
626 
627  typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
628  pushCleanupAfterFullExprImpl<CleanupType>(Kind, ActiveFlag, Saved);
629  }
630 
631  template <class T, class... As>
633  As... A) {
634  LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
635  ActiveFlag.isValid()};
636 
637  size_t OldSize = LifetimeExtendedCleanupStack.size();
638  LifetimeExtendedCleanupStack.resize(
639  LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
640  (Header.IsConditional ? sizeof(ActiveFlag) : 0));
641 
642  static_assert(sizeof(Header) % alignof(T) == 0,
643  "Cleanup will be allocated on misaligned address");
644  char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
645  new (Buffer) LifetimeExtendedCleanupHeader(Header);
646  new (Buffer + sizeof(Header)) T(A...);
647  if (Header.IsConditional)
648  new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
649  }
650 
651  /// Set up the last cleanup that was pushed as a conditional
652  /// full-expression cleanup.
654  initFullExprCleanupWithFlag(createCleanupActiveFlag());
655  }
656 
657  void initFullExprCleanupWithFlag(Address ActiveFlag);
658  Address createCleanupActiveFlag();
659 
660  /// PushDestructorCleanup - Push a cleanup to call the
661  /// complete-object destructor of an object of the given type at the
662  /// given address. Does nothing if T is not a C++ class type with a
663  /// non-trivial destructor.
664  void PushDestructorCleanup(QualType T, Address Addr);
665 
666  /// PushDestructorCleanup - Push a cleanup to call the
667  /// complete-object variant of the given destructor on the object at
668  /// the given address.
669  void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
670 
671  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
672  /// process all branch fixups.
673  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
674 
675  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
676  /// The block cannot be reactivated. Pops it if it's the top of the
677  /// stack.
678  ///
679  /// \param DominatingIP - An instruction which is known to
680  /// dominate the current IP (if set) and which lies along
681  /// all paths of execution between the current IP and the
682  /// the point at which the cleanup comes into scope.
683  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
684  llvm::Instruction *DominatingIP);
685 
686  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
687  /// Cannot be used to resurrect a deactivated cleanup.
688  ///
689  /// \param DominatingIP - An instruction which is known to
690  /// dominate the current IP (if set) and which lies along
691  /// all paths of execution between the current IP and the
692  /// the point at which the cleanup comes into scope.
693  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
694  llvm::Instruction *DominatingIP);
695 
696  /// Enters a new scope for capturing cleanups, all of which
697  /// will be executed once the scope is exited.
699  EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
700  size_t LifetimeExtendedCleanupStackSize;
701  bool OldDidCallStackSave;
702  protected:
704  private:
705 
706  RunCleanupsScope(const RunCleanupsScope &) = delete;
707  void operator=(const RunCleanupsScope &) = delete;
708 
709  protected:
710  CodeGenFunction& CGF;
711 
712  public:
713  /// Enter a new cleanup scope.
714  explicit RunCleanupsScope(CodeGenFunction &CGF)
715  : PerformCleanup(true), CGF(CGF)
716  {
717  CleanupStackDepth = CGF.EHStack.stable_begin();
718  LifetimeExtendedCleanupStackSize =
719  CGF.LifetimeExtendedCleanupStack.size();
720  OldDidCallStackSave = CGF.DidCallStackSave;
721  CGF.DidCallStackSave = false;
722  OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
723  CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
724  }
725 
726  /// Exit this cleanup scope, emitting any accumulated cleanups.
728  if (PerformCleanup)
729  ForceCleanup();
730  }
731 
732  /// Determine whether this scope requires any cleanups.
733  bool requiresCleanups() const {
734  return CGF.EHStack.stable_begin() != CleanupStackDepth;
735  }
736 
737  /// Force the emission of cleanups now, instead of waiting
738  /// until this object is destroyed.
739  /// \param ValuesToReload - A list of values that need to be available at
740  /// the insertion point after cleanup emission. If cleanup emission created
741  /// a shared cleanup block, these value pointers will be rewritten.
742  /// Otherwise, they not will be modified.
743  void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
744  assert(PerformCleanup && "Already forced cleanup");
745  CGF.DidCallStackSave = OldDidCallStackSave;
746  CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
747  ValuesToReload);
748  PerformCleanup = false;
749  CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
750  }
751  };
752 
753  // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
754  EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
755  EHScopeStack::stable_end();
756 
758  SourceRange Range;
760  LexicalScope *ParentScope;
761 
762  LexicalScope(const LexicalScope &) = delete;
763  void operator=(const LexicalScope &) = delete;
764 
765  public:
766  /// Enter a new cleanup scope.
767  explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
768  : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
769  CGF.CurLexicalScope = this;
770  if (CGDebugInfo *DI = CGF.getDebugInfo())
771  DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
772  }
773 
774  void addLabel(const LabelDecl *label) {
775  assert(PerformCleanup && "adding label to dead scope?");
776  Labels.push_back(label);
777  }
778 
779  /// Exit this cleanup scope, emitting any accumulated
780  /// cleanups.
782  if (CGDebugInfo *DI = CGF.getDebugInfo())
783  DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
784 
785  // If we should perform a cleanup, force them now. Note that
786  // this ends the cleanup scope before rescoping any labels.
787  if (PerformCleanup) {
788  ApplyDebugLocation DL(CGF, Range.getEnd());
789  ForceCleanup();
790  }
791  }
792 
793  /// Force the emission of cleanups now, instead of waiting
794  /// until this object is destroyed.
795  void ForceCleanup() {
796  CGF.CurLexicalScope = ParentScope;
797  RunCleanupsScope::ForceCleanup();
798 
799  if (!Labels.empty())
800  rescopeLabels();
801  }
802 
803  bool hasLabels() const {
804  return !Labels.empty();
805  }
806 
807  void rescopeLabels();
808  };
809 
810  typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
811 
812  /// The class used to assign some variables some temporarily addresses.
813  class OMPMapVars {
814  DeclMapTy SavedLocals;
815  DeclMapTy SavedTempAddresses;
816  OMPMapVars(const OMPMapVars &) = delete;
817  void operator=(const OMPMapVars &) = delete;
818 
819  public:
820  explicit OMPMapVars() = default;
822  assert(SavedLocals.empty() && "Did not restored original addresses.");
823  };
824 
825  /// Sets the address of the variable \p LocalVD to be \p TempAddr in
826  /// function \p CGF.
827  /// \return true if at least one variable was set already, false otherwise.
828  bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
829  Address TempAddr) {
830  LocalVD = LocalVD->getCanonicalDecl();
831  // Only save it once.
832  if (SavedLocals.count(LocalVD)) return false;
833 
834  // Copy the existing local entry to SavedLocals.
835  auto it = CGF.LocalDeclMap.find(LocalVD);
836  if (it != CGF.LocalDeclMap.end())
837  SavedLocals.try_emplace(LocalVD, it->second);
838  else
839  SavedLocals.try_emplace(LocalVD, Address::invalid());
840 
841  // Generate the private entry.
842  QualType VarTy = LocalVD->getType();
843  if (VarTy->isReferenceType()) {
844  Address Temp = CGF.CreateMemTemp(VarTy);
845  CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
846  TempAddr = Temp;
847  }
848  SavedTempAddresses.try_emplace(LocalVD, TempAddr);
849 
850  return true;
851  }
852 
853  /// Applies new addresses to the list of the variables.
854  /// \return true if at least one variable is using new address, false
855  /// otherwise.
856  bool apply(CodeGenFunction &CGF) {
857  copyInto(SavedTempAddresses, CGF.LocalDeclMap);
858  SavedTempAddresses.clear();
859  return !SavedLocals.empty();
860  }
861 
862  /// Restores original addresses of the variables.
863  void restore(CodeGenFunction &CGF) {
864  if (!SavedLocals.empty()) {
865  copyInto(SavedLocals, CGF.LocalDeclMap);
866  SavedLocals.clear();
867  }
868  }
869 
870  private:
871  /// Copy all the entries in the source map over the corresponding
872  /// entries in the destination, which must exist.
873  static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
874  for (auto &Pair : Src) {
875  if (!Pair.second.isValid()) {
876  Dest.erase(Pair.first);
877  continue;
878  }
879 
880  auto I = Dest.find(Pair.first);
881  if (I != Dest.end())
882  I->second = Pair.second;
883  else
884  Dest.insert(Pair);
885  }
886  }
887  };
888 
889  /// The scope used to remap some variables as private in the OpenMP loop body
890  /// (or other captured region emitted without outlining), and to restore old
891  /// vars back on exit.
893  OMPMapVars MappedVars;
894  OMPPrivateScope(const OMPPrivateScope &) = delete;
895  void operator=(const OMPPrivateScope &) = delete;
896 
897  public:
898  /// Enter a new OpenMP private scope.
899  explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
900 
901  /// Registers \p LocalVD variable as a private and apply \p PrivateGen
902  /// function for it to generate corresponding private variable. \p
903  /// PrivateGen returns an address of the generated private variable.
904  /// \return true if the variable is registered as private, false if it has
905  /// been privatized already.
906  bool addPrivate(const VarDecl *LocalVD,
907  const llvm::function_ref<Address()> PrivateGen) {
908  assert(PerformCleanup && "adding private to dead scope");
909  return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
910  }
911 
912  /// Privatizes local variables previously registered as private.
913  /// Registration is separate from the actual privatization to allow
914  /// initializers use values of the original variables, not the private one.
915  /// This is important, for example, if the private variable is a class
916  /// variable initialized by a constructor that references other private
917  /// variables. But at initialization original variables must be used, not
918  /// private copies.
919  /// \return true if at least one variable was privatized, false otherwise.
920  bool Privatize() { return MappedVars.apply(CGF); }
921 
922  void ForceCleanup() {
923  RunCleanupsScope::ForceCleanup();
924  MappedVars.restore(CGF);
925  }
926 
927  /// Exit scope - all the mapped variables are restored.
929  if (PerformCleanup)
930  ForceCleanup();
931  }
932 
933  /// Checks if the global variable is captured in current function.
934  bool isGlobalVarCaptured(const VarDecl *VD) const {
935  VD = VD->getCanonicalDecl();
936  return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
937  }
938  };
939 
940  /// Takes the old cleanup stack size and emits the cleanup blocks
941  /// that have been added.
942  void
943  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
944  std::initializer_list<llvm::Value **> ValuesToReload = {});
945 
946  /// Takes the old cleanup stack size and emits the cleanup blocks
947  /// that have been added, then adds all lifetime-extended cleanups from
948  /// the given position to the stack.
949  void
950  PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
951  size_t OldLifetimeExtendedStackSize,
952  std::initializer_list<llvm::Value **> ValuesToReload = {});
953 
954  void ResolveBranchFixups(llvm::BasicBlock *Target);
955 
956  /// The given basic block lies in the current EH scope, but may be a
957  /// target of a potentially scope-crossing jump; get a stable handle
958  /// to which we can perform this jump later.
959  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
960  return JumpDest(Target,
961  EHStack.getInnermostNormalCleanup(),
962  NextCleanupDestIndex++);
963  }
964 
965  /// The given basic block lies in the current EH scope, but may be a
966  /// target of a potentially scope-crossing jump; get a stable handle
967  /// to which we can perform this jump later.
968  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
969  return getJumpDestInCurrentScope(createBasicBlock(Name));
970  }
971 
972  /// EmitBranchThroughCleanup - Emit a branch from the current insert
973  /// block through the normal cleanup handling code (if any) and then
974  /// on to \arg Dest.
975  void EmitBranchThroughCleanup(JumpDest Dest);
976 
977  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
978  /// specified destination obviously has no cleanups to run. 'false' is always
979  /// a conservatively correct answer for this method.
980  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
981 
982  /// popCatchScope - Pops the catch scope at the top of the EHScope
983  /// stack, emitting any required code (other than the catch handlers
984  /// themselves).
985  void popCatchScope();
986 
987  llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
988  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
989  llvm::BasicBlock *
990  getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
991 
992  /// An object to manage conditionally-evaluated expressions.
994  llvm::BasicBlock *StartBB;
995 
996  public:
997  ConditionalEvaluation(CodeGenFunction &CGF)
998  : StartBB(CGF.Builder.GetInsertBlock()) {}
999 
1000  void begin(CodeGenFunction &CGF) {
1001  assert(CGF.OutermostConditional != this);
1002  if (!CGF.OutermostConditional)
1003  CGF.OutermostConditional = this;
1004  }
1005 
1006  void end(CodeGenFunction &CGF) {
1007  assert(CGF.OutermostConditional != nullptr);
1008  if (CGF.OutermostConditional == this)
1009  CGF.OutermostConditional = nullptr;
1010  }
1011 
1012  /// Returns a block which will be executed prior to each
1013  /// evaluation of the conditional code.
1014  llvm::BasicBlock *getStartingBlock() const {
1015  return StartBB;
1016  }
1017  };
1018 
1019  /// isInConditionalBranch - Return true if we're currently emitting
1020  /// one branch or the other of a conditional expression.
1021  bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1022 
1024  assert(isInConditionalBranch());
1025  llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1026  auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
1027  store->setAlignment(addr.getAlignment().getQuantity());
1028  }
1029 
1030  /// An RAII object to record that we're evaluating a statement
1031  /// expression.
1033  CodeGenFunction &CGF;
1034 
1035  /// We have to save the outermost conditional: cleanups in a
1036  /// statement expression aren't conditional just because the
1037  /// StmtExpr is.
1038  ConditionalEvaluation *SavedOutermostConditional;
1039 
1040  public:
1041  StmtExprEvaluation(CodeGenFunction &CGF)
1042  : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1043  CGF.OutermostConditional = nullptr;
1044  }
1045 
1047  CGF.OutermostConditional = SavedOutermostConditional;
1048  CGF.EnsureInsertPoint();
1049  }
1050  };
1051 
1052  /// An object which temporarily prevents a value from being
1053  /// destroyed by aggressive peephole optimizations that assume that
1054  /// all uses of a value have been realized in the IR.
1056  llvm::Instruction *Inst;
1057  friend class CodeGenFunction;
1058 
1059  public:
1060  PeepholeProtection() : Inst(nullptr) {}
1061  };
1062 
1063  /// A non-RAII class containing all the information about a bound
1064  /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1065  /// this which makes individual mappings very simple; using this
1066  /// class directly is useful when you have a variable number of
1067  /// opaque values or don't want the RAII functionality for some
1068  /// reason.
1070  const OpaqueValueExpr *OpaqueValue;
1071  bool BoundLValue;
1073 
1075  bool boundLValue)
1076  : OpaqueValue(ov), BoundLValue(boundLValue) {}
1077  public:
1078  OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1079 
1080  static bool shouldBindAsLValue(const Expr *expr) {
1081  // gl-values should be bound as l-values for obvious reasons.
1082  // Records should be bound as l-values because IR generation
1083  // always keeps them in memory. Expressions of function type
1084  // act exactly like l-values but are formally required to be
1085  // r-values in C.
1086  return expr->isGLValue() ||
1087  expr->getType()->isFunctionType() ||
1088  hasAggregateEvaluationKind(expr->getType());
1089  }
1090 
1091  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1092  const OpaqueValueExpr *ov,
1093  const Expr *e) {
1094  if (shouldBindAsLValue(ov))
1095  return bind(CGF, ov, CGF.EmitLValue(e));
1096  return bind(CGF, ov, CGF.EmitAnyExpr(e));
1097  }
1098 
1099  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1100  const OpaqueValueExpr *ov,
1101  const LValue &lv) {
1102  assert(shouldBindAsLValue(ov));
1103  CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1104  return OpaqueValueMappingData(ov, true);
1105  }
1106 
1107  static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1108  const OpaqueValueExpr *ov,
1109  const RValue &rv) {
1110  assert(!shouldBindAsLValue(ov));
1111  CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1112 
1113  OpaqueValueMappingData data(ov, false);
1114 
1115  // Work around an extremely aggressive peephole optimization in
1116  // EmitScalarConversion which assumes that all other uses of a
1117  // value are extant.
1118  data.Protection = CGF.protectFromPeepholes(rv);
1119 
1120  return data;
1121  }
1122 
1123  bool isValid() const { return OpaqueValue != nullptr; }
1124  void clear() { OpaqueValue = nullptr; }
1125 
1126  void unbind(CodeGenFunction &CGF) {
1127  assert(OpaqueValue && "no data to unbind!");
1128 
1129  if (BoundLValue) {
1130  CGF.OpaqueLValues.erase(OpaqueValue);
1131  } else {
1132  CGF.OpaqueRValues.erase(OpaqueValue);
1133  CGF.unprotectFromPeepholes(Protection);
1134  }
1135  }
1136  };
1137 
1138  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1140  CodeGenFunction &CGF;
1142 
1143  public:
1144  static bool shouldBindAsLValue(const Expr *expr) {
1145  return OpaqueValueMappingData::shouldBindAsLValue(expr);
1146  }
1147 
1148  /// Build the opaque value mapping for the given conditional
1149  /// operator if it's the GNU ?: extension. This is a common
1150  /// enough pattern that the convenience operator is really
1151  /// helpful.
1152  ///
1153  OpaqueValueMapping(CodeGenFunction &CGF,
1154  const AbstractConditionalOperator *op) : CGF(CGF) {
1155  if (isa<ConditionalOperator>(op))
1156  // Leave Data empty.
1157  return;
1158 
1159  const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1160  Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1161  e->getCommon());
1162  }
1163 
1164  /// Build the opaque value mapping for an OpaqueValueExpr whose source
1165  /// expression is set to the expression the OVE represents.
1166  OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
1167  : CGF(CGF) {
1168  if (OV) {
1169  assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1170  "for OVE with no source expression");
1171  Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
1172  }
1173  }
1174 
1175  OpaqueValueMapping(CodeGenFunction &CGF,
1176  const OpaqueValueExpr *opaqueValue,
1177  LValue lvalue)
1178  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1179  }
1180 
1181  OpaqueValueMapping(CodeGenFunction &CGF,
1182  const OpaqueValueExpr *opaqueValue,
1183  RValue rvalue)
1184  : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1185  }
1186 
1187  void pop() {
1188  Data.unbind(CGF);
1189  Data.clear();
1190  }
1191 
1193  if (Data.isValid()) Data.unbind(CGF);
1194  }
1195  };
1196 
1197 private:
1198  CGDebugInfo *DebugInfo;
1199  /// Used to create unique names for artificial VLA size debug info variables.
1200  unsigned VLAExprCounter = 0;
1201  bool DisableDebugInfo = false;
1202 
1203  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1204  /// calling llvm.stacksave for multiple VLAs in the same scope.
1205  bool DidCallStackSave = false;
1206 
1207  /// IndirectBranch - The first time an indirect goto is seen we create a block
1208  /// with an indirect branch. Every time we see the address of a label taken,
1209  /// we add the label to the indirect goto. Every subsequent indirect goto is
1210  /// codegen'd as a jump to the IndirectBranch's basic block.
1211  llvm::IndirectBrInst *IndirectBranch = nullptr;
1212 
1213  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1214  /// decls.
1215  DeclMapTy LocalDeclMap;
1216 
1217  // Keep track of the cleanups for callee-destructed parameters pushed to the
1218  // cleanup stack so that they can be deactivated later.
1219  llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1220  CalleeDestructedParamCleanups;
1221 
1222  /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1223  /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1224  /// parameter.
1225  llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1226  SizeArguments;
1227 
1228  /// Track escaped local variables with auto storage. Used during SEH
1229  /// outlining to produce a call to llvm.localescape.
1230  llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1231 
1232  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1233  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1234 
1235  // BreakContinueStack - This keeps track of where break and continue
1236  // statements should jump to.
1237  struct BreakContinue {
1238  BreakContinue(JumpDest Break, JumpDest Continue)
1239  : BreakBlock(Break), ContinueBlock(Continue) {}
1240 
1241  JumpDest BreakBlock;
1242  JumpDest ContinueBlock;
1243  };
1244  SmallVector<BreakContinue, 8> BreakContinueStack;
1245 
1246  /// Handles cancellation exit points in OpenMP-related constructs.
1247  class OpenMPCancelExitStack {
1248  /// Tracks cancellation exit point and join point for cancel-related exit
1249  /// and normal exit.
1250  struct CancelExit {
1251  CancelExit() = default;
1252  CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1253  JumpDest ContBlock)
1254  : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1256  /// true if the exit block has been emitted already by the special
1257  /// emitExit() call, false if the default codegen is used.
1258  bool HasBeenEmitted = false;
1259  JumpDest ExitBlock;
1260  JumpDest ContBlock;
1261  };
1262 
1264 
1265  public:
1266  OpenMPCancelExitStack() : Stack(1) {}
1267  ~OpenMPCancelExitStack() = default;
1268  /// Fetches the exit block for the current OpenMP construct.
1269  JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1270  /// Emits exit block with special codegen procedure specific for the related
1271  /// OpenMP construct + emits code for normal construct cleanup.
1272  void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1273  const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1274  if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1275  assert(CGF.getOMPCancelDestination(Kind).isValid());
1276  assert(CGF.HaveInsertPoint());
1277  assert(!Stack.back().HasBeenEmitted);
1278  auto IP = CGF.Builder.saveAndClearIP();
1279  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1280  CodeGen(CGF);
1281  CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1282  CGF.Builder.restoreIP(IP);
1283  Stack.back().HasBeenEmitted = true;
1284  }
1285  CodeGen(CGF);
1286  }
1287  /// Enter the cancel supporting \a Kind construct.
1288  /// \param Kind OpenMP directive that supports cancel constructs.
1289  /// \param HasCancel true, if the construct has inner cancel directive,
1290  /// false otherwise.
1291  void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1292  Stack.push_back({Kind,
1293  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1294  : JumpDest(),
1295  HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1296  : JumpDest()});
1297  }
1298  /// Emits default exit point for the cancel construct (if the special one
1299  /// has not be used) + join point for cancel/normal exits.
1300  void exit(CodeGenFunction &CGF) {
1301  if (getExitBlock().isValid()) {
1302  assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1303  bool HaveIP = CGF.HaveInsertPoint();
1304  if (!Stack.back().HasBeenEmitted) {
1305  if (HaveIP)
1306  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1307  CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1308  CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1309  }
1310  CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1311  if (!HaveIP) {
1312  CGF.Builder.CreateUnreachable();
1313  CGF.Builder.ClearInsertionPoint();
1314  }
1315  }
1316  Stack.pop_back();
1317  }
1318  };
1319  OpenMPCancelExitStack OMPCancelStack;
1320 
1321  CodeGenPGO PGO;
1322 
1323  /// Calculate branch weights appropriate for PGO data
1324  llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount);
1325  llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights);
1326  llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1327  uint64_t LoopCount);
1328 
1329 public:
1330  /// Increment the profiler's counter for the given statement by \p StepV.
1331  /// If \p StepV is null, the default increment is 1.
1332  void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1334  PGO.emitCounterIncrement(Builder, S, StepV);
1335  PGO.setCurrentStmt(S);
1336  }
1337 
1338  /// Get the profiler's count for the given statement.
1339  uint64_t getProfileCount(const Stmt *S) {
1340  Optional<uint64_t> Count = PGO.getStmtCount(S);
1341  if (!Count.hasValue())
1342  return 0;
1343  return *Count;
1344  }
1345 
1346  /// Set the profiler's current count.
1347  void setCurrentProfileCount(uint64_t Count) {
1348  PGO.setCurrentRegionCount(Count);
1349  }
1350 
1351  /// Get the profiler's current count. This is generally the count for the most
1352  /// recently incremented counter.
1354  return PGO.getCurrentRegionCount();
1355  }
1356 
1357 private:
1358 
1359  /// SwitchInsn - This is nearest current switch instruction. It is null if
1360  /// current context is not in a switch.
1361  llvm::SwitchInst *SwitchInsn = nullptr;
1362  /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1363  SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1364 
1365  /// CaseRangeBlock - This block holds if condition check for last case
1366  /// statement range in current switch instruction.
1367  llvm::BasicBlock *CaseRangeBlock = nullptr;
1368 
1369  /// OpaqueLValues - Keeps track of the current set of opaque value
1370  /// expressions.
1371  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1372  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1373 
1374  // VLASizeMap - This keeps track of the associated size for each VLA type.
1375  // We track this by the size expression rather than the type itself because
1376  // in certain situations, like a const qualifier applied to an VLA typedef,
1377  // multiple VLA types can share the same size expression.
1378  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1379  // enter/leave scopes.
1380  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1381 
1382  /// A block containing a single 'unreachable' instruction. Created
1383  /// lazily by getUnreachableBlock().
1384  llvm::BasicBlock *UnreachableBlock = nullptr;
1385 
1386  /// Counts of the number return expressions in the function.
1387  unsigned NumReturnExprs = 0;
1388 
1389  /// Count the number of simple (constant) return expressions in the function.
1390  unsigned NumSimpleReturnExprs = 0;
1391 
1392  /// The last regular (non-return) debug location (breakpoint) in the function.
1393  SourceLocation LastStopPoint;
1394 
1395 public:
1396  /// A scope within which we are constructing the fields of an object which
1397  /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1398  /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1400  public:
1401  FieldConstructionScope(CodeGenFunction &CGF, Address This)
1402  : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1403  CGF.CXXDefaultInitExprThis = This;
1404  }
1406  CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1407  }
1408 
1409  private:
1410  CodeGenFunction &CGF;
1411  Address OldCXXDefaultInitExprThis;
1412  };
1413 
1414  /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1415  /// is overridden to be the object under construction.
1417  public:
1418  CXXDefaultInitExprScope(CodeGenFunction &CGF)
1419  : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1420  OldCXXThisAlignment(CGF.CXXThisAlignment) {
1421  CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
1422  CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1423  }
1425  CGF.CXXThisValue = OldCXXThisValue;
1426  CGF.CXXThisAlignment = OldCXXThisAlignment;
1427  }
1428 
1429  public:
1430  CodeGenFunction &CGF;
1433  };
1434 
1435  /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1436  /// current loop index is overridden.
1438  public:
1439  ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1440  : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1441  CGF.ArrayInitIndex = Index;
1442  }
1444  CGF.ArrayInitIndex = OldArrayInitIndex;
1445  }
1446 
1447  private:
1448  CodeGenFunction &CGF;
1449  llvm::Value *OldArrayInitIndex;
1450  };
1451 
1453  public:
1455  : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1456  OldCurCodeDecl(CGF.CurCodeDecl),
1457  OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1458  OldCXXABIThisValue(CGF.CXXABIThisValue),
1459  OldCXXThisValue(CGF.CXXThisValue),
1460  OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1461  OldCXXThisAlignment(CGF.CXXThisAlignment),
1462  OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1463  OldCXXInheritedCtorInitExprArgs(
1464  std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1465  CGF.CurGD = GD;
1466  CGF.CurFuncDecl = CGF.CurCodeDecl =
1467  cast<CXXConstructorDecl>(GD.getDecl());
1468  CGF.CXXABIThisDecl = nullptr;
1469  CGF.CXXABIThisValue = nullptr;
1470  CGF.CXXThisValue = nullptr;
1471  CGF.CXXABIThisAlignment = CharUnits();
1472  CGF.CXXThisAlignment = CharUnits();
1473  CGF.ReturnValue = Address::invalid();
1474  CGF.FnRetTy = QualType();
1475  CGF.CXXInheritedCtorInitExprArgs.clear();
1476  }
1478  CGF.CurGD = OldCurGD;
1479  CGF.CurFuncDecl = OldCurFuncDecl;
1480  CGF.CurCodeDecl = OldCurCodeDecl;
1481  CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1482  CGF.CXXABIThisValue = OldCXXABIThisValue;
1483  CGF.CXXThisValue = OldCXXThisValue;
1484  CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1485  CGF.CXXThisAlignment = OldCXXThisAlignment;
1486  CGF.ReturnValue = OldReturnValue;
1487  CGF.FnRetTy = OldFnRetTy;
1488  CGF.CXXInheritedCtorInitExprArgs =
1489  std::move(OldCXXInheritedCtorInitExprArgs);
1490  }
1491 
1492  private:
1493  CodeGenFunction &CGF;
1494  GlobalDecl OldCurGD;
1495  const Decl *OldCurFuncDecl;
1496  const Decl *OldCurCodeDecl;
1497  ImplicitParamDecl *OldCXXABIThisDecl;
1498  llvm::Value *OldCXXABIThisValue;
1499  llvm::Value *OldCXXThisValue;
1500  CharUnits OldCXXABIThisAlignment;
1501  CharUnits OldCXXThisAlignment;
1502  Address OldReturnValue;
1503  QualType OldFnRetTy;
1504  CallArgList OldCXXInheritedCtorInitExprArgs;
1505  };
1506 
1507 private:
1508  /// CXXThisDecl - When generating code for a C++ member function,
1509  /// this will hold the implicit 'this' declaration.
1510  ImplicitParamDecl *CXXABIThisDecl = nullptr;
1511  llvm::Value *CXXABIThisValue = nullptr;
1512  llvm::Value *CXXThisValue = nullptr;
1513  CharUnits CXXABIThisAlignment;
1514  CharUnits CXXThisAlignment;
1515 
1516  /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
1517  /// this expression.
1518  Address CXXDefaultInitExprThis = Address::invalid();
1519 
1520  /// The current array initialization index when evaluating an
1521  /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
1522  llvm::Value *ArrayInitIndex = nullptr;
1523 
1524  /// The values of function arguments to use when evaluating
1525  /// CXXInheritedCtorInitExprs within this context.
1526  CallArgList CXXInheritedCtorInitExprArgs;
1527 
1528  /// CXXStructorImplicitParamDecl - When generating code for a constructor or
1529  /// destructor, this will hold the implicit argument (e.g. VTT).
1530  ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
1531  llvm::Value *CXXStructorImplicitParamValue = nullptr;
1532 
1533  /// OutermostConditional - Points to the outermost active
1534  /// conditional control. This is used so that we know if a
1535  /// temporary should be destroyed conditionally.
1536  ConditionalEvaluation *OutermostConditional = nullptr;
1537 
1538  /// The current lexical scope.
1539  LexicalScope *CurLexicalScope = nullptr;
1540 
1541  /// The current source location that should be used for exception
1542  /// handling code.
1543  SourceLocation CurEHLocation;
1544 
1545  /// BlockByrefInfos - For each __block variable, contains
1546  /// information about the layout of the variable.
1547  llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
1548 
1549  /// Used by -fsanitize=nullability-return to determine whether the return
1550  /// value can be checked.
1551  llvm::Value *RetValNullabilityPrecondition = nullptr;
1552 
1553  /// Check if -fsanitize=nullability-return instrumentation is required for
1554  /// this function.
1555  bool requiresReturnValueNullabilityCheck() const {
1556  return RetValNullabilityPrecondition;
1557  }
1558 
1559  /// Used to store precise source locations for return statements by the
1560  /// runtime return value checks.
1561  Address ReturnLocation = Address::invalid();
1562 
1563  /// Check if the return value of this function requires sanitization.
1564  bool requiresReturnValueCheck() const {
1565  return requiresReturnValueNullabilityCheck() ||
1566  (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1567  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>());
1568  }
1569 
1570  llvm::BasicBlock *TerminateLandingPad = nullptr;
1571  llvm::BasicBlock *TerminateHandler = nullptr;
1572  llvm::BasicBlock *TrapBB = nullptr;
1573 
1574  /// Terminate funclets keyed by parent funclet pad.
1575  llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
1576 
1577  /// Largest vector width used in ths function. Will be used to create a
1578  /// function attribute.
1579  unsigned LargestVectorWidth = 0;
1580 
1581  /// True if we need emit the life-time markers.
1582  const bool ShouldEmitLifetimeMarkers;
1583 
1584  /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
1585  /// the function metadata.
1586  void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
1587  llvm::Function *Fn);
1588 
1589 public:
1590  CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1591  ~CodeGenFunction();
1592 
1593  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1594  ASTContext &getContext() const { return CGM.getContext(); }
1596  if (DisableDebugInfo)
1597  return nullptr;
1598  return DebugInfo;
1599  }
1600  void disableDebugInfo() { DisableDebugInfo = true; }
1601  void enableDebugInfo() { DisableDebugInfo = false; }
1602 
1604  return CGM.getCodeGenOpts().OptimizationLevel == 0;
1605  }
1606 
1607  const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1608 
1609  /// Returns a pointer to the function's exception object and selector slot,
1610  /// which is assigned in every landing pad.
1611  Address getExceptionSlot();
1612  Address getEHSelectorSlot();
1613 
1614  /// Returns the contents of the function's exception object and selector
1615  /// slots.
1616  llvm::Value *getExceptionFromSlot();
1617  llvm::Value *getSelectorFromSlot();
1618 
1619  Address getNormalCleanupDestSlot();
1620 
1621  llvm::BasicBlock *getUnreachableBlock() {
1622  if (!UnreachableBlock) {
1623  UnreachableBlock = createBasicBlock("unreachable");
1624  new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1625  }
1626  return UnreachableBlock;
1627  }
1628 
1629  llvm::BasicBlock *getInvokeDest() {
1630  if (!EHStack.requiresLandingPad()) return nullptr;
1631  return getInvokeDestImpl();
1632  }
1633 
1634  bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; }
1635 
1636  const TargetInfo &getTarget() const { return Target; }
1637  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1639  return CGM.getTargetCodeGenInfo();
1640  }
1641 
1642  //===--------------------------------------------------------------------===//
1643  // Cleanups
1644  //===--------------------------------------------------------------------===//
1645 
1646  typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
1647 
1648  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1649  Address arrayEndPointer,
1650  QualType elementType,
1651  CharUnits elementAlignment,
1652  Destroyer *destroyer);
1653  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1654  llvm::Value *arrayEnd,
1655  QualType elementType,
1656  CharUnits elementAlignment,
1657  Destroyer *destroyer);
1658 
1659  void pushDestroy(QualType::DestructionKind dtorKind,
1660  Address addr, QualType type);
1661  void pushEHDestroy(QualType::DestructionKind dtorKind,
1662  Address addr, QualType type);
1663  void pushDestroy(CleanupKind kind, Address addr, QualType type,
1664  Destroyer *destroyer, bool useEHCleanupForArray);
1665  void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
1666  QualType type, Destroyer *destroyer,
1667  bool useEHCleanupForArray);
1668  void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1669  llvm::Value *CompletePtr,
1670  QualType ElementType);
1671  void pushStackRestore(CleanupKind kind, Address SPMem);
1672  void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
1673  bool useEHCleanupForArray);
1674  llvm::Function *generateDestroyHelper(Address addr, QualType type,
1675  Destroyer *destroyer,
1676  bool useEHCleanupForArray,
1677  const VarDecl *VD);
1678  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1679  QualType elementType, CharUnits elementAlign,
1680  Destroyer *destroyer,
1681  bool checkZeroLength, bool useEHCleanup);
1682 
1683  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1684 
1685  /// Determines whether an EH cleanup is required to destroy a type
1686  /// with the given destruction kind.
1688  switch (kind) {
1689  case QualType::DK_none:
1690  return false;
1691  case QualType::DK_cxx_destructor:
1692  case QualType::DK_objc_weak_lifetime:
1693  case QualType::DK_nontrivial_c_struct:
1694  return getLangOpts().Exceptions;
1695  case QualType::DK_objc_strong_lifetime:
1696  return getLangOpts().Exceptions &&
1697  CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1698  }
1699  llvm_unreachable("bad destruction kind");
1700  }
1701 
1703  return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1704  }
1705 
1706  //===--------------------------------------------------------------------===//
1707  // Objective-C
1708  //===--------------------------------------------------------------------===//
1709 
1710  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1711 
1712  void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
1713 
1714  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1715  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1716  const ObjCPropertyImplDecl *PID);
1717  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1718  const ObjCPropertyImplDecl *propImpl,
1719  const ObjCMethodDecl *GetterMothodDecl,
1720  llvm::Constant *AtomicHelperFn);
1721 
1722  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1723  ObjCMethodDecl *MD, bool ctor);
1724 
1725  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1726  /// for the given property.
1727  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1728  const ObjCPropertyImplDecl *PID);
1729  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1730  const ObjCPropertyImplDecl *propImpl,
1731  llvm::Constant *AtomicHelperFn);
1732 
1733  //===--------------------------------------------------------------------===//
1734  // Block Bits
1735  //===--------------------------------------------------------------------===//
1736 
1737  /// Emit block literal.
1738  /// \return an LLVM value which is a pointer to a struct which contains
1739  /// information about the block, including the block invoke function, the
1740  /// captured variables, etc.
1741  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1742  static void destroyBlockInfos(CGBlockInfo *info);
1743 
1744  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1745  const CGBlockInfo &Info,
1746  const DeclMapTy &ldm,
1747  bool IsLambdaConversionToBlock,
1748  bool BuildGlobalBlock);
1749 
1750  /// Check if \p T is a C++ class that has a destructor that can throw.
1751  static bool cxxDestructorCanThrow(QualType T);
1752 
1753  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1754  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1755  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1756  const ObjCPropertyImplDecl *PID);
1757  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1758  const ObjCPropertyImplDecl *PID);
1759  llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1760 
1761  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
1762  bool CanThrow);
1763 
1764  class AutoVarEmission;
1765 
1766  void emitByrefStructureInit(const AutoVarEmission &emission);
1767 
1768  /// Enter a cleanup to destroy a __block variable. Note that this
1769  /// cleanup should be a no-op if the variable hasn't left the stack
1770  /// yet; if a cleanup is required for the variable itself, that needs
1771  /// to be done externally.
1772  ///
1773  /// \param Kind Cleanup kind.
1774  ///
1775  /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
1776  /// structure that will be passed to _Block_object_dispose. When
1777  /// \p LoadBlockVarAddr is true, the address of the field of the block
1778  /// structure that holds the address of the __block structure.
1779  ///
1780  /// \param Flags The flag that will be passed to _Block_object_dispose.
1781  ///
1782  /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
1783  /// \p Addr to get the address of the __block structure.
1784  void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
1785  bool LoadBlockVarAddr, bool CanThrow);
1786 
1787  void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
1788  llvm::Value *ptr);
1789 
1790  Address LoadBlockStruct();
1791  Address GetAddrOfBlockDecl(const VarDecl *var);
1792 
1793  /// BuildBlockByrefAddress - Computes the location of the
1794  /// data in a variable which is declared as __block.
1795  Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
1796  bool followForward = true);
1797  Address emitBlockByrefAddress(Address baseAddr,
1798  const BlockByrefInfo &info,
1799  bool followForward,
1800  const llvm::Twine &name);
1801 
1802  const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
1803 
1804  QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args);
1805 
1806  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1807  const CGFunctionInfo &FnInfo);
1808 
1809  /// Annotate the function with an attribute that disables TSan checking at
1810  /// runtime.
1811  void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
1812 
1813  /// Emit code for the start of a function.
1814  /// \param Loc The location to be associated with the function.
1815  /// \param StartLoc The location of the function body.
1816  void StartFunction(GlobalDecl GD,
1817  QualType RetTy,
1818  llvm::Function *Fn,
1819  const CGFunctionInfo &FnInfo,
1820  const FunctionArgList &Args,
1822  SourceLocation StartLoc = SourceLocation());
1823 
1824  static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
1825 
1826  void EmitConstructorBody(FunctionArgList &Args);
1827  void EmitDestructorBody(FunctionArgList &Args);
1828  void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
1829  void EmitFunctionBody(const Stmt *Body);
1830  void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
1831 
1832  void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
1833  CallArgList &CallArgs);
1834  void EmitLambdaBlockInvokeBody();
1835  void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1836  void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
1838  EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
1839  }
1840  void EmitAsanPrologueOrEpilogue(bool Prologue);
1841 
1842  /// Emit the unified return block, trying to avoid its emission when
1843  /// possible.
1844  /// \return The debug location of the user written return statement if the
1845  /// return block is is avoided.
1846  llvm::DebugLoc EmitReturnBlock();
1847 
1848  /// FinishFunction - Complete IR generation of the current function. It is
1849  /// legal to call this function even if there is no current insertion point.
1850  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1851 
1852  void StartThunk(llvm::Function *Fn, GlobalDecl GD,
1853  const CGFunctionInfo &FnInfo, bool IsUnprototyped);
1854 
1855  void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
1856  const ThunkInfo *Thunk, bool IsUnprototyped);
1857 
1858  void FinishThunk();
1859 
1860  /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
1861  void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
1862  llvm::FunctionCallee Callee);
1863 
1864  /// Generate a thunk for the given method.
1865  void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1866  GlobalDecl GD, const ThunkInfo &Thunk,
1867  bool IsUnprototyped);
1868 
1869  llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
1870  const CGFunctionInfo &FnInfo,
1871  GlobalDecl GD, const ThunkInfo &Thunk);
1872 
1873  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1874  FunctionArgList &Args);
1875 
1876  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
1877 
1878  /// Struct with all information about dynamic [sub]class needed to set vptr.
1879  struct VPtr {
1884  };
1885 
1886  /// Initialize the vtable pointer of the given subobject.
1887  void InitializeVTablePointer(const VPtr &vptr);
1888 
1890 
1891  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1892  VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
1893 
1894  void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
1895  CharUnits OffsetFromNearestVBase,
1896  bool BaseIsNonVirtualPrimaryBase,
1897  const CXXRecordDecl *VTableClass,
1898  VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
1899 
1900  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1901 
1902  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1903  /// to by This.
1904  llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
1905  const CXXRecordDecl *VTableClass);
1906 
1915  };
1916 
1917  /// Derived is the presumed address of an object of type T after a
1918  /// cast. If T is a polymorphic class type, emit a check that the virtual
1919  /// table for Derived belongs to a class derived from T.
1920  void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
1921  bool MayBeNull, CFITypeCheckKind TCK,
1922  SourceLocation Loc);
1923 
1924  /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
1925  /// If vptr CFI is enabled, emit a check that VTable is valid.
1926  void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
1927  CFITypeCheckKind TCK, SourceLocation Loc);
1928 
1929  /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
1930  /// RD using llvm.type.test.
1931  void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
1932  CFITypeCheckKind TCK, SourceLocation Loc);
1933 
1934  /// If whole-program virtual table optimization is enabled, emit an assumption
1935  /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
1936  /// enabled, emit a check that VTable is a member of RD's type identifier.
1937  void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
1938  llvm::Value *VTable, SourceLocation Loc);
1939 
1940  /// Returns whether we should perform a type checked load when loading a
1941  /// virtual function for virtual calls to members of RD. This is generally
1942  /// true when both vcall CFI and whole-program-vtables are enabled.
1943  bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
1944 
1945  /// Emit a type checked load from the given vtable.
1946  llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
1947  uint64_t VTableByteOffset);
1948 
1949  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1950  /// given phase of destruction for a destructor. The end result
1951  /// should call destructors on members and base classes in reverse
1952  /// order of their construction.
1953  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1954 
1955  /// ShouldInstrumentFunction - Return true if the current function should be
1956  /// instrumented with __cyg_profile_func_* calls
1957  bool ShouldInstrumentFunction();
1958 
1959  /// ShouldXRayInstrument - Return true if the current function should be
1960  /// instrumented with XRay nop sleds.
1961  bool ShouldXRayInstrumentFunction() const;
1962 
1963  /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
1964  /// XRay custom event handling calls.
1965  bool AlwaysEmitXRayCustomEvents() const;
1966 
1967  /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
1968  /// XRay typed event handling calls.
1969  bool AlwaysEmitXRayTypedEvents() const;
1970 
1971  /// Encode an address into a form suitable for use in a function prologue.
1972  llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
1973  llvm::Constant *Addr);
1974 
1975  /// Decode an address used in a function prologue, encoded by \c
1976  /// EncodeAddrForUseInPrologue.
1977  llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
1978  llvm::Value *EncodedAddr);
1979 
1980  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1981  /// arguments for the given function. This is also responsible for naming the
1982  /// LLVM function arguments.
1983  void EmitFunctionProlog(const CGFunctionInfo &FI,
1984  llvm::Function *Fn,
1985  const FunctionArgList &Args);
1986 
1987  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1988  /// given temporary.
1989  void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
1990  SourceLocation EndLoc);
1991 
1992  /// Emit a test that checks if the return value \p RV is nonnull.
1993  void EmitReturnValueCheck(llvm::Value *RV);
1994 
1995  /// EmitStartEHSpec - Emit the start of the exception spec.
1996  void EmitStartEHSpec(const Decl *D);
1997 
1998  /// EmitEndEHSpec - Emit the end of the exception spec.
1999  void EmitEndEHSpec(const Decl *D);
2000 
2001  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2002  llvm::BasicBlock *getTerminateLandingPad();
2003 
2004  /// getTerminateLandingPad - Return a cleanup funclet that just calls
2005  /// terminate.
2006  llvm::BasicBlock *getTerminateFunclet();
2007 
2008  /// getTerminateHandler - Return a handler (not a landing pad, just
2009  /// a catch handler) that just calls terminate. This is used when
2010  /// a terminate scope encloses a try.
2011  llvm::BasicBlock *getTerminateHandler();
2012 
2013  llvm::Type *ConvertTypeForMem(QualType T);
2014  llvm::Type *ConvertType(QualType T);
2015  llvm::Type *ConvertType(const TypeDecl *T) {
2016  return ConvertType(getContext().getTypeDeclType(T));
2017  }
2018 
2019  /// LoadObjCSelf - Load the value of self. This function is only valid while
2020  /// generating code for an Objective-C method.
2021  llvm::Value *LoadObjCSelf();
2022 
2023  /// TypeOfSelfObject - Return type of object that this self represents.
2024  QualType TypeOfSelfObject();
2025 
2026  /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2027  static TypeEvaluationKind getEvaluationKind(QualType T);
2028 
2030  return getEvaluationKind(T) == TEK_Scalar;
2031  }
2032 
2034  return getEvaluationKind(T) == TEK_Aggregate;
2035  }
2036 
2037  /// createBasicBlock - Create an LLVM basic block.
2038  llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2039  llvm::Function *parent = nullptr,
2040  llvm::BasicBlock *before = nullptr) {
2041  return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2042  }
2043 
2044  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2045  /// label maps to.
2046  JumpDest getJumpDestForLabel(const LabelDecl *S);
2047 
2048  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2049  /// another basic block, simplify it. This assumes that no other code could
2050  /// potentially reference the basic block.
2051  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2052 
2053  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2054  /// adding a fall-through branch from the current insert block if
2055  /// necessary. It is legal to call this function even if there is no current
2056  /// insertion point.
2057  ///
2058  /// IsFinished - If true, indicates that the caller has finished emitting
2059  /// branches to the given block and does not expect to emit code into it. This
2060  /// means the block can be ignored if it is unreachable.
2061  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2062 
2063  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2064  /// near its uses, and leave the insertion point in it.
2065  void EmitBlockAfterUses(llvm::BasicBlock *BB);
2066 
2067  /// EmitBranch - Emit a branch to the specified basic block from the current
2068  /// insert block, taking care to avoid creation of branches from dummy
2069  /// blocks. It is legal to call this function even if there is no current
2070  /// insertion point.
2071  ///
2072  /// This function clears the current insertion point. The caller should follow
2073  /// calls to this function with calls to Emit*Block prior to generation new
2074  /// code.
2075  void EmitBranch(llvm::BasicBlock *Block);
2076 
2077  /// HaveInsertPoint - True if an insertion point is defined. If not, this
2078  /// indicates that the current code being emitted is unreachable.
2079  bool HaveInsertPoint() const {
2080  return Builder.GetInsertBlock() != nullptr;
2081  }
2082 
2083  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2084  /// emitted IR has a place to go. Note that by definition, if this function
2085  /// creates a block then that block is unreachable; callers may do better to
2086  /// detect when no insertion point is defined and simply skip IR generation.
2088  if (!HaveInsertPoint())
2089  EmitBlock(createBasicBlock());
2090  }
2091 
2092  /// ErrorUnsupported - Print out an error that codegen doesn't support the
2093  /// specified stmt yet.
2094  void ErrorUnsupported(const Stmt *S, const char *Type);
2095 
2096  //===--------------------------------------------------------------------===//
2097  // Helpers
2098  //===--------------------------------------------------------------------===//
2099 
2102  return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2103  CGM.getTBAAAccessInfo(T));
2104  }
2105 
2107  TBAAAccessInfo TBAAInfo) {
2108  return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2109  }
2110 
2113  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
2114  LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
2115  }
2116 
2118  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
2119  return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
2120  BaseInfo, TBAAInfo);
2121  }
2122 
2123  LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
2124  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
2125  CharUnits getNaturalTypeAlignment(QualType T,
2126  LValueBaseInfo *BaseInfo = nullptr,
2127  TBAAAccessInfo *TBAAInfo = nullptr,
2128  bool forPointeeType = false);
2129  CharUnits getNaturalPointeeTypeAlignment(QualType T,
2130  LValueBaseInfo *BaseInfo = nullptr,
2131  TBAAAccessInfo *TBAAInfo = nullptr);
2132 
2133  Address EmitLoadOfReference(LValue RefLVal,
2134  LValueBaseInfo *PointeeBaseInfo = nullptr,
2135  TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2136  LValue EmitLoadOfReferenceLValue(LValue RefLVal);
2138  AlignmentSource Source =
2140  LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2141  CGM.getTBAAAccessInfo(RefTy));
2142  return EmitLoadOfReferenceLValue(RefLVal);
2143  }
2144 
2145  Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
2146  LValueBaseInfo *BaseInfo = nullptr,
2147  TBAAAccessInfo *TBAAInfo = nullptr);
2148  LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
2149 
2150  /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2151  /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2152  /// insertion point of the builder. The caller is responsible for setting an
2153  /// appropriate alignment on
2154  /// the alloca.
2155  ///
2156  /// \p ArraySize is the number of array elements to be allocated if it
2157  /// is not nullptr.
2158  ///
2159  /// LangAS::Default is the address space of pointers to local variables and
2160  /// temporaries, as exposed in the source language. In certain
2161  /// configurations, this is not the same as the alloca address space, and a
2162  /// cast is needed to lift the pointer from the alloca AS into
2163  /// LangAS::Default. This can happen when the target uses a restricted
2164  /// address space for the stack but the source language requires
2165  /// LangAS::Default to be a generic address space. The latter condition is
2166  /// common for most programming languages; OpenCL is an exception in that
2167  /// LangAS::Default is the private address space, which naturally maps
2168  /// to the stack.
2169  ///
2170  /// Because the address of a temporary is often exposed to the program in
2171  /// various ways, this function will perform the cast. The original alloca
2172  /// instruction is returned through \p Alloca if it is not nullptr.
2173  ///
2174  /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2175  /// more efficient if the caller knows that the address will not be exposed.
2176  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2177  llvm::Value *ArraySize = nullptr);
2178  Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
2179  const Twine &Name = "tmp",
2180  llvm::Value *ArraySize = nullptr,
2181  Address *Alloca = nullptr);
2182  Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
2183  const Twine &Name = "tmp",
2184  llvm::Value *ArraySize = nullptr);
2185 
2186  /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2187  /// default ABI alignment of the given LLVM type.
2188  ///
2189  /// IMPORTANT NOTE: This is *not* generally the right alignment for
2190  /// any given AST type that happens to have been lowered to the
2191  /// given IR type. This should only ever be used for function-local,
2192  /// IR-driven manipulations like saving and restoring a value. Do
2193  /// not hand this address off to arbitrary IRGen routines, and especially
2194  /// do not pass it as an argument to a function that might expect a
2195  /// properly ABI-aligned value.
2196  Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
2197  const Twine &Name = "tmp");
2198 
2199  /// InitTempAlloca - Provide an initial value for the given alloca which
2200  /// will be observable at all locations in the function.
2201  ///
2202  /// The address should be something that was returned from one of
2203  /// the CreateTempAlloca or CreateMemTemp routines, and the
2204  /// initializer must be valid in the entry block (i.e. it must
2205  /// either be a constant or an argument value).
2206  void InitTempAlloca(Address Alloca, llvm::Value *Value);
2207 
2208  /// CreateIRTemp - Create a temporary IR object of the given type, with
2209  /// appropriate alignment. This routine should only be used when an temporary
2210  /// value needs to be stored into an alloca (for example, to avoid explicit
2211  /// PHI construction), but the type is the IR type, not the type appropriate
2212  /// for storing in memory.
2213  ///
2214  /// That is, this is exactly equivalent to CreateMemTemp, but calling
2215  /// ConvertType instead of ConvertTypeForMem.
2216  Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
2217 
2218  /// CreateMemTemp - Create a temporary memory object of the given type, with
2219  /// appropriate alignmen and cast it to the default address space. Returns
2220  /// the original alloca instruction by \p Alloca if it is not nullptr.
2221  Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
2222  Address *Alloca = nullptr);
2223  Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
2224  Address *Alloca = nullptr);
2225 
2226  /// CreateMemTemp - Create a temporary memory object of the given type, with
2227  /// appropriate alignmen without casting it to the default address space.
2228  Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2229  Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
2230  const Twine &Name = "tmp");
2231 
2232  /// CreateAggTemp - Create a temporary memory object for the given
2233  /// aggregate type.
2234  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
2235  return AggValueSlot::forAddr(CreateMemTemp(T, Name),
2236  T.getQualifiers(),
2237  AggValueSlot::IsNotDestructed,
2238  AggValueSlot::DoesNotNeedGCBarriers,
2239  AggValueSlot::IsNotAliased,
2240  AggValueSlot::DoesNotOverlap);
2241  }
2242 
2243  /// Emit a cast to void* in the appropriate address space.
2244  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
2245 
2246  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2247  /// expression and compare the result against zero, returning an Int1Ty value.
2248  llvm::Value *EvaluateExprAsBool(const Expr *E);
2249 
2250  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2251  void EmitIgnoredExpr(const Expr *E);
2252 
2253  /// EmitAnyExpr - Emit code to compute the specified expression which can have
2254  /// any type. The result is returned as an RValue struct. If this is an
2255  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2256  /// the result should be returned.
2257  ///
2258  /// \param ignoreResult True if the resulting value isn't used.
2259  RValue EmitAnyExpr(const Expr *E,
2260  AggValueSlot aggSlot = AggValueSlot::ignored(),
2261  bool ignoreResult = false);
2262 
2263  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2264  // or the value of the expression, depending on how va_list is defined.
2265  Address EmitVAListRef(const Expr *E);
2266 
2267  /// Emit a "reference" to a __builtin_ms_va_list; this is
2268  /// always the value of the expression, because a __builtin_ms_va_list is a
2269  /// pointer to a char.
2270  Address EmitMSVAListRef(const Expr *E);
2271 
2272  /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2273  /// always be accessible even if no aggregate location is provided.
2274  RValue EmitAnyExprToTemp(const Expr *E);
2275 
2276  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2277  /// arbitrary expression into the given memory location.
2278  void EmitAnyExprToMem(const Expr *E, Address Location,
2279  Qualifiers Quals, bool IsInitializer);
2280 
2281  void EmitAnyExprToExn(const Expr *E, Address Addr);
2282 
2283  /// EmitExprAsInit - Emits the code necessary to initialize a
2284  /// location in memory with the given initializer.
2285  void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2286  bool capturedByInit);
2287 
2288  /// hasVolatileMember - returns true if aggregate type has a volatile
2289  /// member.
2291  if (const RecordType *RT = T->getAs<RecordType>()) {
2292  const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2293  return RD->hasVolatileMember();
2294  }
2295  return false;
2296  }
2297 
2298  /// Determine whether a return value slot may overlap some other object.
2300  // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2301  // class subobjects. These cases may need to be revisited depending on the
2302  // resolution of the relevant core issue.
2303  return AggValueSlot::DoesNotOverlap;
2304  }
2305 
2306  /// Determine whether a field initialization may overlap some other object.
2308  // FIXME: These cases can result in overlap as a result of P0840R0's
2309  // [[no_unique_address]] attribute. We can still infer NoOverlap in the
2310  // presence of that attribute if the field is within the nvsize of its
2311  // containing class, because non-virtual subobjects are initialized in
2312  // address order.
2313  return AggValueSlot::DoesNotOverlap;
2314  }
2315 
2316  /// Determine whether a base class initialization may overlap some other
2317  /// object.
2318  AggValueSlot::Overlap_t overlapForBaseInit(const CXXRecordDecl *RD,
2319  const CXXRecordDecl *BaseRD,
2320  bool IsVirtual);
2321 
2322  /// Emit an aggregate assignment.
2323  void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
2324  bool IsVolatile = hasVolatileMember(EltTy);
2325  EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2326  }
2327 
2329  AggValueSlot::Overlap_t MayOverlap) {
2330  EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2331  }
2332 
2333  /// EmitAggregateCopy - Emit an aggregate copy.
2334  ///
2335  /// \param isVolatile \c true iff either the source or the destination is
2336  /// volatile.
2337  /// \param MayOverlap Whether the tail padding of the destination might be
2338  /// occupied by some other object. More efficient code can often be
2339  /// generated if not.
2340  void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
2341  AggValueSlot::Overlap_t MayOverlap,
2342  bool isVolatile = false);
2343 
2344  /// GetAddrOfLocalVar - Return the address of a local variable.
2346  auto it = LocalDeclMap.find(VD);
2347  assert(it != LocalDeclMap.end() &&
2348  "Invalid argument to GetAddrOfLocalVar(), no decl!");
2349  return it->second;
2350  }
2351 
2352  /// Given an opaque value expression, return its LValue mapping if it exists,
2353  /// otherwise create one.
2354  LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
2355 
2356  /// Given an opaque value expression, return its RValue mapping if it exists,
2357  /// otherwise create one.
2358  RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
2359 
2360  /// Get the index of the current ArrayInitLoopExpr, if any.
2361  llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
2362 
2363  /// getAccessedFieldNo - Given an encoded value and a result number, return
2364  /// the input field number being accessed.
2365  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
2366 
2367  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
2368  llvm::BasicBlock *GetIndirectGotoBlock();
2369 
2370  /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
2371  static bool IsWrappedCXXThis(const Expr *E);
2372 
2373  /// EmitNullInitialization - Generate code to set a value of the given type to
2374  /// null, If the type contains data member pointers, they will be initialized
2375  /// to -1 in accordance with the Itanium C++ ABI.
2376  void EmitNullInitialization(Address DestPtr, QualType Ty);
2377 
2378  /// Emits a call to an LLVM variable-argument intrinsic, either
2379  /// \c llvm.va_start or \c llvm.va_end.
2380  /// \param ArgValue A reference to the \c va_list as emitted by either
2381  /// \c EmitVAListRef or \c EmitMSVAListRef.
2382  /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
2383  /// calls \c llvm.va_end.
2384  llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
2385 
2386  /// Generate code to get an argument from the passed in pointer
2387  /// and update it accordingly.
2388  /// \param VE The \c VAArgExpr for which to generate code.
2389  /// \param VAListAddr Receives a reference to the \c va_list as emitted by
2390  /// either \c EmitVAListRef or \c EmitMSVAListRef.
2391  /// \returns A pointer to the argument.
2392  // FIXME: We should be able to get rid of this method and use the va_arg
2393  // instruction in LLVM instead once it works well enough.
2394  Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
2395 
2396  /// emitArrayLength - Compute the length of an array, even if it's a
2397  /// VLA, and drill down to the base element type.
2398  llvm::Value *emitArrayLength(const ArrayType *arrayType,
2399  QualType &baseType,
2400  Address &addr);
2401 
2402  /// EmitVLASize - Capture all the sizes for the VLA expressions in
2403  /// the given variably-modified type and store them in the VLASizeMap.
2404  ///
2405  /// This function can be called with a null (unreachable) insert point.
2406  void EmitVariablyModifiedType(QualType Ty);
2407 
2408  struct VlaSizePair {
2411 
2412  VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
2413  };
2414 
2415  /// Return the number of elements for a single dimension
2416  /// for the given array type.
2417  VlaSizePair getVLAElements1D(const VariableArrayType *vla);
2418  VlaSizePair getVLAElements1D(QualType vla);
2419 
2420  /// Returns an LLVM value that corresponds to the size,
2421  /// in non-variably-sized elements, of a variable length array type,
2422  /// plus that largest non-variably-sized element type. Assumes that
2423  /// the type has already been emitted with EmitVariablyModifiedType.
2424  VlaSizePair getVLASize(const VariableArrayType *vla);
2425  VlaSizePair getVLASize(QualType vla);
2426 
2427  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
2428  /// generating code for an C++ member function.
2430  assert(CXXThisValue && "no 'this' value for this function");
2431  return CXXThisValue;
2432  }
2433  Address LoadCXXThisAddress();
2434 
2435  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
2436  /// virtual bases.
2437  // FIXME: Every place that calls LoadCXXVTT is something
2438  // that needs to be abstracted properly.
2440  assert(CXXStructorImplicitParamValue && "no VTT value for this function");
2441  return CXXStructorImplicitParamValue;
2442  }
2443 
2444  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
2445  /// complete class to the given direct base.
2446  Address
2447  GetAddressOfDirectBaseInCompleteClass(Address Value,
2448  const CXXRecordDecl *Derived,
2449  const CXXRecordDecl *Base,
2450  bool BaseIsVirtual);
2451 
2452  static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
2453 
2454  /// GetAddressOfBaseClass - This function will add the necessary delta to the
2455  /// load of 'this' and returns address of the base class.
2456  Address GetAddressOfBaseClass(Address Value,
2457  const CXXRecordDecl *Derived,
2460  bool NullCheckValue, SourceLocation Loc);
2461 
2462  Address GetAddressOfDerivedClass(Address Value,
2463  const CXXRecordDecl *Derived,
2466  bool NullCheckValue);
2467 
2468  /// GetVTTParameter - Return the VTT parameter that should be passed to a
2469  /// base constructor/destructor with virtual bases.
2470  /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
2471  /// to ItaniumCXXABI.cpp together with all the references to VTT.
2472  llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
2473  bool Delegating);
2474 
2475  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
2476  CXXCtorType CtorType,
2477  const FunctionArgList &Args,
2478  SourceLocation Loc);
2479  // It's important not to confuse this and the previous function. Delegating
2480  // constructors are the C++0x feature. The constructor delegate optimization
2481  // is used to reduce duplication in the base and complete consturctors where
2482  // they are substantially the same.
2483  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2484  const FunctionArgList &Args);
2485 
2486  /// Emit a call to an inheriting constructor (that is, one that invokes a
2487  /// constructor inherited from a base class) by inlining its definition. This
2488  /// is necessary if the ABI does not support forwarding the arguments to the
2489  /// base class constructor (because they're variadic or similar).
2490  void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor,
2491  CXXCtorType CtorType,
2492  bool ForVirtualBase,
2493  bool Delegating,
2494  CallArgList &Args);
2495 
2496  /// Emit a call to a constructor inherited from a base class, passing the
2497  /// current constructor's arguments along unmodified (without even making
2498  /// a copy).
2499  void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D,
2500  bool ForVirtualBase, Address This,
2501  bool InheritedFromVBase,
2502  const CXXInheritedCtorInitExpr *E);
2503 
2504  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2505  bool ForVirtualBase, bool Delegating,
2506  Address This, const CXXConstructExpr *E,
2507  AggValueSlot::Overlap_t Overlap,
2508  bool NewPointerIsChecked);
2509 
2510  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
2511  bool ForVirtualBase, bool Delegating,
2512  Address This, CallArgList &Args,
2513  AggValueSlot::Overlap_t Overlap,
2514  SourceLocation Loc,
2515  bool NewPointerIsChecked);
2516 
2517  /// Emit assumption load for all bases. Requires to be be called only on
2518  /// most-derived class and not under construction of the object.
2519  void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
2520 
2521  /// Emit assumption that vptr load == global vtable.
2522  void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
2523 
2524  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
2525  Address This, Address Src,
2526  const CXXConstructExpr *E);
2527 
2528  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2529  const ArrayType *ArrayTy,
2530  Address ArrayPtr,
2531  const CXXConstructExpr *E,
2532  bool NewPointerIsChecked,
2533  bool ZeroInitialization = false);
2534 
2535  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
2536  llvm::Value *NumElements,
2537  Address ArrayPtr,
2538  const CXXConstructExpr *E,
2539  bool NewPointerIsChecked,
2540  bool ZeroInitialization = false);
2541 
2542  static Destroyer destroyCXXObject;
2543 
2544  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
2545  bool ForVirtualBase, bool Delegating,
2546  Address This);
2547 
2548  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
2549  llvm::Type *ElementTy, Address NewPtr,
2550  llvm::Value *NumElements,
2551  llvm::Value *AllocSizeWithoutCookie);
2552 
2553  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
2554  Address Ptr);
2555 
2556  llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
2557  void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
2558 
2559  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
2560  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
2561 
2562  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
2563  QualType DeleteTy, llvm::Value *NumElements = nullptr,
2564  CharUnits CookieSize = CharUnits());
2565 
2566  RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
2567  const CallExpr *TheCallExpr, bool IsDelete);
2568 
2569  llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
2570  llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
2571  Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
2572 
2573  /// Situations in which we might emit a check for the suitability of a
2574  /// pointer or glvalue.
2576  /// Checking the operand of a load. Must be suitably sized and aligned.
2578  /// Checking the destination of a store. Must be suitably sized and aligned.
2580  /// Checking the bound value in a reference binding. Must be suitably sized
2581  /// and aligned, but is not required to refer to an object (until the
2582  /// reference is used), per core issue 453.
2584  /// Checking the object expression in a non-static data member access. Must
2585  /// be an object within its lifetime.
2587  /// Checking the 'this' pointer for a call to a non-static member function.
2588  /// Must be an object within its lifetime.
2590  /// Checking the 'this' pointer for a constructor call.
2592  /// Checking the operand of a static_cast to a derived pointer type. Must be
2593  /// null or an object within its lifetime.
2595  /// Checking the operand of a static_cast to a derived reference type. Must
2596  /// be an object within its lifetime.
2598  /// Checking the operand of a cast to a base object. Must be suitably sized
2599  /// and aligned.
2601  /// Checking the operand of a cast to a virtual base object. Must be an
2602  /// object within its lifetime.
2604  /// Checking the value assigned to a _Nonnull pointer. Must not be null.
2606  /// Checking the operand of a dynamic_cast or a typeid expression. Must be
2607  /// null or an object within its lifetime.
2608  TCK_DynamicOperation
2609  };
2610 
2611  /// Determine whether the pointer type check \p TCK permits null pointers.
2612  static bool isNullPointerAllowed(TypeCheckKind TCK);
2613 
2614  /// Determine whether the pointer type check \p TCK requires a vptr check.
2615  static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
2616 
2617  /// Whether any type-checking sanitizers are enabled. If \c false,
2618  /// calls to EmitTypeCheck can be skipped.
2619  bool sanitizePerformTypeCheck() const;
2620 
2621  /// Emit a check that \p V is the address of storage of the
2622  /// appropriate size and alignment for an object of type \p Type
2623  /// (or if ArraySize is provided, for an array of that bound).
2624  void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
2625  QualType Type, CharUnits Alignment = CharUnits::Zero(),
2626  SanitizerSet SkippedChecks = SanitizerSet(),
2627  llvm::Value *ArraySize = nullptr);
2628 
2629  /// Emit a check that \p Base points into an array object, which
2630  /// we can access at index \p Index. \p Accessed should be \c false if we
2631  /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
2632  void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
2633  QualType IndexType, bool Accessed);
2634 
2635  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2636  bool isInc, bool isPre);
2637  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
2638  bool isInc, bool isPre);
2639 
2640  /// Converts Location to a DebugLoc, if debug information is enabled.
2641  llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
2642 
2643 
2644  //===--------------------------------------------------------------------===//
2645  // Declaration Emission
2646  //===--------------------------------------------------------------------===//
2647 
2648  /// EmitDecl - Emit a declaration.
2649  ///
2650  /// This function can be called with a null (unreachable) insert point.
2651  void EmitDecl(const Decl &D);
2652 
2653  /// EmitVarDecl - Emit a local variable declaration.
2654  ///
2655  /// This function can be called with a null (unreachable) insert point.
2656  void EmitVarDecl(const VarDecl &D);
2657 
2658  void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2659  bool capturedByInit);
2660 
2661  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
2662  llvm::Value *Address);
2663 
2664  /// Determine whether the given initializer is trivial in the sense
2665  /// that it requires no code to be generated.
2666  bool isTrivialInitializer(const Expr *Init);
2667 
2668  /// EmitAutoVarDecl - Emit an auto variable declaration.
2669  ///
2670  /// This function can be called with a null (unreachable) insert point.
2671  void EmitAutoVarDecl(const VarDecl &D);
2672 
2674  friend class CodeGenFunction;
2675 
2676  const VarDecl *Variable;
2677 
2678  /// The address of the alloca for languages with explicit address space
2679  /// (e.g. OpenCL) or alloca casted to generic pointer for address space
2680  /// agnostic languages (e.g. C++). Invalid if the variable was emitted
2681  /// as a global constant.
2682  Address Addr;
2683 
2684  llvm::Value *NRVOFlag;
2685 
2686  /// True if the variable is a __block variable that is captured by an
2687  /// escaping block.
2688  bool IsEscapingByRef;
2689 
2690  /// True if the variable is of aggregate type and has a constant
2691  /// initializer.
2692  bool IsConstantAggregate;
2693 
2694  /// Non-null if we should use lifetime annotations.
2695  llvm::Value *SizeForLifetimeMarkers;
2696 
2697  /// Address with original alloca instruction. Invalid if the variable was
2698  /// emitted as a global constant.
2699  Address AllocaAddr;
2700 
2701  struct Invalid {};
2702  AutoVarEmission(Invalid)
2703  : Variable(nullptr), Addr(Address::invalid()),
2704  AllocaAddr(Address::invalid()) {}
2705 
2706  AutoVarEmission(const VarDecl &variable)
2707  : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
2708  IsEscapingByRef(false), IsConstantAggregate(false),
2709  SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
2710 
2711  bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
2712 
2713  public:
2714  static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
2715 
2716  bool useLifetimeMarkers() const {
2717  return SizeForLifetimeMarkers != nullptr;
2718  }
2720  assert(useLifetimeMarkers());
2721  return SizeForLifetimeMarkers;
2722  }
2723 
2724  /// Returns the raw, allocated address, which is not necessarily
2725  /// the address of the object itself. It is casted to default
2726  /// address space for address space agnostic languages.
2728  return Addr;
2729  }
2730 
2731  /// Returns the address for the original alloca instruction.
2732  Address getOriginalAllocatedAddress() const { return AllocaAddr; }
2733 
2734  /// Returns the address of the object within this declaration.
2735  /// Note that this does not chase the forwarding pointer for
2736  /// __block decls.
2737  Address getObjectAddress(CodeGenFunction &CGF) const {
2738  if (!IsEscapingByRef) return Addr;
2739 
2740  return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
2741  }
2742  };
2743  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
2744  void EmitAutoVarInit(const AutoVarEmission &emission);
2745  void EmitAutoVarCleanups(const AutoVarEmission &emission);
2746  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
2747  QualType::DestructionKind dtorKind);
2748 
2749  /// Emits the alloca and debug information for the size expressions for each
2750  /// dimension of an array. It registers the association of its (1-dimensional)
2751  /// QualTypes and size expression's debug node, so that CGDebugInfo can
2752  /// reference this node when creating the DISubrange object to describe the
2753  /// array types.
2754  void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI,
2755  const VarDecl &D,
2756  bool EmitDebugInfo);
2757 
2758  void EmitStaticVarDecl(const VarDecl &D,
2759  llvm::GlobalValue::LinkageTypes Linkage);
2760 
2761  class ParamValue {
2762  llvm::Value *Value;
2763  unsigned Alignment;
2764  ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
2765  public:
2767  return ParamValue(value, 0);
2768  }
2770  assert(!addr.getAlignment().isZero());
2771  return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
2772  }
2773 
2774  bool isIndirect() const { return Alignment != 0; }
2775  llvm::Value *getAnyValue() const { return Value; }
2776 
2778  assert(!isIndirect());
2779  return Value;
2780  }
2781 
2783  assert(isIndirect());
2784  return Address(Value, CharUnits::fromQuantity(Alignment));
2785  }
2786  };
2787 
2788  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
2789  void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
2790 
2791  /// protectFromPeepholes - Protect a value that we're intending to
2792  /// store to the side, but which will probably be used later, from
2793  /// aggressive peepholing optimizations that might delete it.
2794  ///
2795  /// Pass the result to unprotectFromPeepholes to declare that
2796  /// protection is no longer required.
2797  ///
2798  /// There's no particular reason why this shouldn't apply to
2799  /// l-values, it's just that no existing peepholes work on pointers.
2800  PeepholeProtection protectFromPeepholes(RValue rvalue);
2801  void unprotectFromPeepholes(PeepholeProtection protection);
2802 
2803  void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
2804  SourceLocation Loc,
2805  SourceLocation AssumptionLoc,
2806  llvm::Value *Alignment,
2807  llvm::Value *OffsetValue,
2808  llvm::Value *TheCheck,
2809  llvm::Instruction *Assumption);
2810 
2811  void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
2812  SourceLocation Loc, SourceLocation AssumptionLoc,
2813  llvm::Value *Alignment,
2814  llvm::Value *OffsetValue = nullptr);
2815 
2816  void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
2817  SourceLocation Loc, SourceLocation AssumptionLoc,
2818  unsigned Alignment,
2819  llvm::Value *OffsetValue = nullptr);
2820 
2821  void EmitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
2822  SourceLocation AssumptionLoc, unsigned Alignment,
2823  llvm::Value *OffsetValue = nullptr);
2824 
2825  //===--------------------------------------------------------------------===//
2826  // Statement Emission
2827  //===--------------------------------------------------------------------===//
2828 
2829  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
2830  void EmitStopPoint(const Stmt *S);
2831 
2832  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
2833  /// this function even if there is no current insertion point.
2834  ///
2835  /// This function may clear the current insertion point; callers should use
2836  /// EnsureInsertPoint if they wish to subsequently generate code without first
2837  /// calling EmitBlock, EmitBranch, or EmitStmt.
2838  void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = None);
2839 
2840  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
2841  /// necessarily require an insertion point or debug information; typically
2842  /// because the statement amounts to a jump or a container of other
2843  /// statements.
2844  ///
2845  /// \return True if the statement was handled.
2846  bool EmitSimpleStmt(const Stmt *S);
2847 
2848  Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
2849  AggValueSlot AVS = AggValueSlot::ignored());
2850  Address EmitCompoundStmtWithoutScope(const CompoundStmt &S,
2851  bool GetLast = false,
2852  AggValueSlot AVS =
2853  AggValueSlot::ignored());
2854 
2855  /// EmitLabel - Emit the block for the given label. It is legal to call this
2856  /// function even if there is no current insertion point.
2857  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
2858 
2859  void EmitLabelStmt(const LabelStmt &S);
2860  void EmitAttributedStmt(const AttributedStmt &S);
2861  void EmitGotoStmt(const GotoStmt &S);
2862  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
2863  void EmitIfStmt(const IfStmt &S);
2864 
2865  void EmitWhileStmt(const WhileStmt &S,
2866  ArrayRef<const Attr *> Attrs = None);
2867  void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
2868  void EmitForStmt(const ForStmt &S,
2869  ArrayRef<const Attr *> Attrs = None);
2870  void EmitReturnStmt(const ReturnStmt &S);
2871  void EmitDeclStmt(const DeclStmt &S);
2872  void EmitBreakStmt(const BreakStmt &S);
2873  void EmitContinueStmt(const ContinueStmt &S);
2874  void EmitSwitchStmt(const SwitchStmt &S);
2875  void EmitDefaultStmt(const DefaultStmt &S);
2876  void EmitCaseStmt(const CaseStmt &S);
2877  void EmitCaseStmtRange(const CaseStmt &S);
2878  void EmitAsmStmt(const AsmStmt &S);
2879 
2880  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
2881  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
2882  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
2883  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
2884  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
2885 
2886  void EmitCoroutineBody(const CoroutineBodyStmt &S);
2887  void EmitCoreturnStmt(const CoreturnStmt &S);
2888  RValue EmitCoawaitExpr(const CoawaitExpr &E,
2889  AggValueSlot aggSlot = AggValueSlot::ignored(),
2890  bool ignoreResult = false);
2891  LValue EmitCoawaitLValue(const CoawaitExpr *E);
2892  RValue EmitCoyieldExpr(const CoyieldExpr &E,
2893  AggValueSlot aggSlot = AggValueSlot::ignored(),
2894  bool ignoreResult = false);
2895  LValue EmitCoyieldLValue(const CoyieldExpr *E);
2896  RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
2897 
2898  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2899  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2900 
2901  void EmitCXXTryStmt(const CXXTryStmt &S);
2902  void EmitSEHTryStmt(const SEHTryStmt &S);
2903  void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
2904  void EnterSEHTryStmt(const SEHTryStmt &S);
2905  void ExitSEHTryStmt(const SEHTryStmt &S);
2906 
2907  void pushSEHCleanup(CleanupKind kind,
2908  llvm::Function *FinallyFunc);
2909  void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
2910  const Stmt *OutlinedStmt);
2911 
2912  llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
2913  const SEHExceptStmt &Except);
2914 
2915  llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
2916  const SEHFinallyStmt &Finally);
2917 
2918  void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
2919  llvm::Value *ParentFP,
2920  llvm::Value *EntryEBP);
2921  llvm::Value *EmitSEHExceptionCode();
2922  llvm::Value *EmitSEHExceptionInfo();
2923  llvm::Value *EmitSEHAbnormalTermination();
2924 
2925  /// Emit simple code for OpenMP directives in Simd-only mode.
2926  void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
2927 
2928  /// Scan the outlined statement for captures from the parent function. For
2929  /// each capture, mark the capture as escaped and emit a call to
2930  /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
2931  void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
2932  bool IsFilter);
2933 
2934  /// Recovers the address of a local in a parent function. ParentVar is the
2935  /// address of the variable used in the immediate parent function. It can
2936  /// either be an alloca or a call to llvm.localrecover if there are nested
2937  /// outlined functions. ParentFP is the frame pointer of the outermost parent
2938  /// frame.
2939  Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
2940  Address ParentVar,
2941  llvm::Value *ParentFP);
2942 
2943  void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
2944  ArrayRef<const Attr *> Attrs = None);
2945 
2946  /// Controls insertion of cancellation exit blocks in worksharing constructs.
2948  CodeGenFunction &CGF;
2949 
2950  public:
2951  OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
2952  bool HasCancel)
2953  : CGF(CGF) {
2954  CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
2955  }
2956  ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
2957  };
2958 
2959  /// Returns calculated size of the specified type.
2960  llvm::Value *getTypeSize(QualType Ty);
2961  LValue InitCapturedStruct(const CapturedStmt &S);
2962  llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
2963  llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
2964  Address GenerateCapturedStmtArgument(const CapturedStmt &S);
2965  llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
2966  void GenerateOpenMPCapturedVars(const CapturedStmt &S,
2967  SmallVectorImpl<llvm::Value *> &CapturedVars);
2968  void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
2969  SourceLocation Loc);
2970  /// Perform element by element copying of arrays with type \a
2971  /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
2972  /// generated by \a CopyGen.
2973  ///
2974  /// \param DestAddr Address of the destination array.
2975  /// \param SrcAddr Address of the source array.
2976  /// \param OriginalType Type of destination and source arrays.
2977  /// \param CopyGen Copying procedure that copies value of single array element
2978  /// to another single array element.
2979  void EmitOMPAggregateAssign(
2980  Address DestAddr, Address SrcAddr, QualType OriginalType,
2981  const llvm::function_ref<void(Address, Address)> CopyGen);
2982  /// Emit proper copying of data from one variable to another.
2983  ///
2984  /// \param OriginalType Original type of the copied variables.
2985  /// \param DestAddr Destination address.
2986  /// \param SrcAddr Source address.
2987  /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
2988  /// type of the base array element).
2989  /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
2990  /// the base array element).
2991  /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
2992  /// DestVD.
2993  void EmitOMPCopy(QualType OriginalType,
2994  Address DestAddr, Address SrcAddr,
2995  const VarDecl *DestVD, const VarDecl *SrcVD,
2996  const Expr *Copy);
2997  /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
2998  /// \a X = \a E \a BO \a E.
2999  ///
3000  /// \param X Value to be updated.
3001  /// \param E Update value.
3002  /// \param BO Binary operation for update operation.
3003  /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3004  /// expression, false otherwise.
3005  /// \param AO Atomic ordering of the generated atomic instructions.
3006  /// \param CommonGen Code generator for complex expressions that cannot be
3007  /// expressed through atomicrmw instruction.
3008  /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3009  /// generated, <false, RValue::get(nullptr)> otherwise.
3010  std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3011  LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3012  llvm::AtomicOrdering AO, SourceLocation Loc,
3013  const llvm::function_ref<RValue(RValue)> CommonGen);
3014  bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
3015  OMPPrivateScope &PrivateScope);
3016  void EmitOMPPrivateClause(const OMPExecutableDirective &D,
3017  OMPPrivateScope &PrivateScope);
3018  void EmitOMPUseDevicePtrClause(
3019  const OMPClause &C, OMPPrivateScope &PrivateScope,
3020  const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
3021  /// Emit code for copyin clause in \a D directive. The next code is
3022  /// generated at the start of outlined functions for directives:
3023  /// \code
3024  /// threadprivate_var1 = master_threadprivate_var1;
3025  /// operator=(threadprivate_var2, master_threadprivate_var2);
3026  /// ...
3027  /// __kmpc_barrier(&loc, global_tid);
3028  /// \endcode
3029  ///
3030  /// \param D OpenMP directive possibly with 'copyin' clause(s).
3031  /// \returns true if at least one copyin variable is found, false otherwise.
3032  bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
3033  /// Emit initial code for lastprivate variables. If some variable is
3034  /// not also firstprivate, then the default initialization is used. Otherwise
3035  /// initialization of this variable is performed by EmitOMPFirstprivateClause
3036  /// method.
3037  ///
3038  /// \param D Directive that may have 'lastprivate' directives.
3039  /// \param PrivateScope Private scope for capturing lastprivate variables for
3040  /// proper codegen in internal captured statement.
3041  ///
3042  /// \returns true if there is at least one lastprivate variable, false
3043  /// otherwise.
3044  bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
3045  OMPPrivateScope &PrivateScope);
3046  /// Emit final copying of lastprivate values to original variables at
3047  /// the end of the worksharing or simd directive.
3048  ///
3049  /// \param D Directive that has at least one 'lastprivate' directives.
3050  /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3051  /// it is the last iteration of the loop code in associated directive, or to
3052  /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3053  void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
3054  bool NoFinals,
3055  llvm::Value *IsLastIterCond = nullptr);
3056  /// Emit initial code for linear clauses.
3057  void EmitOMPLinearClause(const OMPLoopDirective &D,
3058  CodeGenFunction::OMPPrivateScope &PrivateScope);
3059  /// Emit final code for linear clauses.
3060  /// \param CondGen Optional conditional code for final part of codegen for
3061  /// linear clause.
3062  void EmitOMPLinearClauseFinal(
3063  const OMPLoopDirective &D,
3064  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3065  /// Emit initial code for reduction variables. Creates reduction copies
3066  /// and initializes them with the values according to OpenMP standard.
3067  ///
3068  /// \param D Directive (possibly) with the 'reduction' clause.
3069  /// \param PrivateScope Private scope for capturing reduction variables for
3070  /// proper codegen in internal captured statement.
3071  ///
3072  void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
3073  OMPPrivateScope &PrivateScope);
3074  /// Emit final update of reduction values to original variables at
3075  /// the end of the directive.
3076  ///
3077  /// \param D Directive that has at least one 'reduction' directives.
3078  /// \param ReductionKind The kind of reduction to perform.
3079  void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
3080  const OpenMPDirectiveKind ReductionKind);
3081  /// Emit initial code for linear variables. Creates private copies
3082  /// and initializes them with the values according to OpenMP standard.
3083  ///
3084  /// \param D Directive (possibly) with the 'linear' clause.
3085  /// \return true if at least one linear variable is found that should be
3086  /// initialized with the value of the original variable, false otherwise.
3087  bool EmitOMPLinearClauseInit(const OMPLoopDirective &D);
3088 
3089  typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3090  llvm::Function * /*OutlinedFn*/,
3091  const OMPTaskDataTy & /*Data*/)>
3093  void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
3094  const OpenMPDirectiveKind CapturedRegion,
3095  const RegionCodeGenTy &BodyGen,
3096  const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3098  Address BasePointersArray = Address::invalid();
3099  Address PointersArray = Address::invalid();
3100  Address SizesArray = Address::invalid();
3101  unsigned NumberOfTargetItems = 0;
3102  explicit OMPTargetDataInfo() = default;
3103  OMPTargetDataInfo(Address BasePointersArray, Address PointersArray,
3104  Address SizesArray, unsigned NumberOfTargetItems)
3105  : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
3106  SizesArray(SizesArray), NumberOfTargetItems(NumberOfTargetItems) {}
3107  };
3108  void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
3109  const RegionCodeGenTy &BodyGen,
3110  OMPTargetDataInfo &InputInfo);
3111 
3112  void EmitOMPParallelDirective(const OMPParallelDirective &S);
3113  void EmitOMPSimdDirective(const OMPSimdDirective &S);
3114  void EmitOMPForDirective(const OMPForDirective &S);
3115  void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
3116  void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
3117  void EmitOMPSectionDirective(const OMPSectionDirective &S);
3118  void EmitOMPSingleDirective(const OMPSingleDirective &S);
3119  void EmitOMPMasterDirective(const OMPMasterDirective &S);
3120  void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
3121  void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
3122  void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
3123  void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
3124  void EmitOMPTaskDirective(const OMPTaskDirective &S);
3125  void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
3126  void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
3127  void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
3128  void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
3129  void EmitOMPFlushDirective(const OMPFlushDirective &S);
3130  void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
3131  void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
3132  void EmitOMPTargetDirective(const OMPTargetDirective &S);
3133  void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
3134  void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S);
3135  void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S);
3136  void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S);
3137  void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S);
3138  void
3139  EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S);
3140  void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
3141  void
3142  EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
3143  void EmitOMPCancelDirective(const OMPCancelDirective &S);
3144  void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S);
3145  void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
3146  void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
3147  void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
3148  void EmitOMPDistributeParallelForDirective(
3150  void EmitOMPDistributeParallelForSimdDirective(
3152  void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
3153  void EmitOMPTargetParallelForSimdDirective(
3155  void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
3156  void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
3157  void
3158  EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
3159  void EmitOMPTeamsDistributeParallelForSimdDirective(
3161  void EmitOMPTeamsDistributeParallelForDirective(
3163  void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
3164  void EmitOMPTargetTeamsDistributeDirective(
3166  void EmitOMPTargetTeamsDistributeParallelForDirective(
3168  void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3170  void EmitOMPTargetTeamsDistributeSimdDirective(
3172 
3173  /// Emit device code for the target directive.
3174  static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3175  StringRef ParentName,
3176  const OMPTargetDirective &S);
3177  static void
3178  EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3179  const OMPTargetParallelDirective &S);
3180  /// Emit device code for the target parallel for directive.
3181  static void EmitOMPTargetParallelForDeviceFunction(
3182  CodeGenModule &CGM, StringRef ParentName,
3184  /// Emit device code for the target parallel for simd directive.
3185  static void EmitOMPTargetParallelForSimdDeviceFunction(
3186  CodeGenModule &CGM, StringRef ParentName,
3188  /// Emit device code for the target teams directive.
3189  static void
3190  EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
3191  const OMPTargetTeamsDirective &S);
3192  /// Emit device code for the target teams distribute directive.
3193  static void EmitOMPTargetTeamsDistributeDeviceFunction(
3194  CodeGenModule &CGM, StringRef ParentName,
3196  /// Emit device code for the target teams distribute simd directive.
3197  static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3198  CodeGenModule &CGM, StringRef ParentName,
3200  /// Emit device code for the target simd directive.
3201  static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
3202  StringRef ParentName,
3203  const OMPTargetSimdDirective &S);
3204  /// Emit device code for the target teams distribute parallel for simd
3205  /// directive.
3206  static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3207  CodeGenModule &CGM, StringRef ParentName,
3209 
3210  static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3211  CodeGenModule &CGM, StringRef ParentName,
3213  /// Emit inner loop of the worksharing/simd construct.
3214  ///
3215  /// \param S Directive, for which the inner loop must be emitted.
3216  /// \param RequiresCleanup true, if directive has some associated private
3217  /// variables.
3218  /// \param LoopCond Bollean condition for loop continuation.
3219  /// \param IncExpr Increment expression for loop control variable.
3220  /// \param BodyGen Generator for the inner body of the inner loop.
3221  /// \param PostIncGen Genrator for post-increment code (required for ordered
3222  /// loop directvies).
3223  void EmitOMPInnerLoop(
3224  const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
3225  const Expr *IncExpr,
3226  const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3227  const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3228 
3229  JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
3230  /// Emit initial code for loop counters of loop-based directives.
3231  void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
3232  OMPPrivateScope &LoopScope);
3233 
3234  /// Helper for the OpenMP loop directives.
3235  void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
3236 
3237  /// Emit code for the worksharing loop-based directive.
3238  /// \return true, if this construct has any lastprivate clause, false -
3239  /// otherwise.
3240  bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
3241  const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3242  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3243 
3244  /// Emit code for the distribute loop-based directive.
3245  void EmitOMPDistributeLoop(const OMPLoopDirective &S,
3246  const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
3247 
3248  /// Helpers for the OpenMP loop directives.
3249  void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
3250  void EmitOMPSimdFinal(
3251  const OMPLoopDirective &D,
3252  const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3253 
3254  /// Emits the lvalue for the expression with possibly captured variable.
3255  LValue EmitOMPSharedLValue(const Expr *E);
3256 
3257 private:
3258  /// Helpers for blocks.
3259  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
3260 
3261  /// struct with the values to be passed to the OpenMP loop-related functions
3262  struct OMPLoopArguments {
3263  /// loop lower bound
3264  Address LB = Address::invalid();
3265  /// loop upper bound
3266  Address UB = Address::invalid();
3267  /// loop stride
3268  Address ST = Address::invalid();
3269  /// isLastIteration argument for runtime functions
3270  Address IL = Address::invalid();
3271  /// Chunk value generated by sema
3272  llvm::Value *Chunk = nullptr;
3273  /// EnsureUpperBound
3274  Expr *EUB = nullptr;
3275  /// IncrementExpression
3276  Expr *IncExpr = nullptr;
3277  /// Loop initialization
3278  Expr *Init = nullptr;
3279  /// Loop exit condition
3280  Expr *Cond = nullptr;
3281  /// Update of LB after a whole chunk has been executed
3282  Expr *NextLB = nullptr;
3283  /// Update of UB after a whole chunk has been executed
3284  Expr *NextUB = nullptr;
3285  OMPLoopArguments() = default;
3286  OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
3287  llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
3288  Expr *IncExpr = nullptr, Expr *Init = nullptr,
3289  Expr *Cond = nullptr, Expr *NextLB = nullptr,
3290  Expr *NextUB = nullptr)
3291  : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
3292  IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
3293  NextUB(NextUB) {}
3294  };
3295  void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
3296  const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
3297  const OMPLoopArguments &LoopArgs,
3298  const CodeGenLoopTy &CodeGenLoop,
3299  const CodeGenOrderedTy &CodeGenOrdered);
3300  void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
3301  bool IsMonotonic, const OMPLoopDirective &S,
3302  OMPPrivateScope &LoopScope, bool Ordered,
3303  const OMPLoopArguments &LoopArgs,
3304  const CodeGenDispatchBoundsTy &CGDispatchBounds);
3305  void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
3306  const OMPLoopDirective &S,
3307  OMPPrivateScope &LoopScope,
3308  const OMPLoopArguments &LoopArgs,
3309  const CodeGenLoopTy &CodeGenLoopContent);
3310  /// Emit code for sections directive.
3311  void EmitSections(const OMPExecutableDirective &S);
3312 
3313 public:
3314 
3315  //===--------------------------------------------------------------------===//
3316  // LValue Expression Emission
3317  //===--------------------------------------------------------------------===//
3318 
3319  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
3320  RValue GetUndefRValue(QualType Ty);
3321 
3322  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
3323  /// and issue an ErrorUnsupported style diagnostic (using the
3324  /// provided Name).
3325  RValue EmitUnsupportedRValue(const Expr *E,
3326  const char *Name);
3327 
3328  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
3329  /// an ErrorUnsupported style diagnostic (using the provided Name).
3330  LValue EmitUnsupportedLValue(const Expr *E,
3331  const char *Name);
3332 
3333  /// EmitLValue - Emit code to compute a designator that specifies the location
3334  /// of the expression.
3335  ///
3336  /// This can return one of two things: a simple address or a bitfield
3337  /// reference. In either case, the LLVM Value* in the LValue structure is
3338  /// guaranteed to be an LLVM pointer type.
3339  ///
3340  /// If this returns a bitfield reference, nothing about the pointee type of
3341  /// the LLVM value is known: For example, it may not be a pointer to an
3342  /// integer.
3343  ///
3344  /// If this returns a normal address, and if the lvalue's C type is fixed
3345  /// size, this method guarantees that the returned pointer type will point to
3346  /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
3347  /// variable length type, this is not possible.
3348  ///
3349  LValue EmitLValue(const Expr *E);
3350 
3351  /// Same as EmitLValue but additionally we generate checking code to
3352  /// guard against undefined behavior. This is only suitable when we know
3353  /// that the address will be used to access the object.
3354  LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
3355 
3356  RValue convertTempToRValue(Address addr, QualType type,
3357  SourceLocation Loc);
3358 
3359  void EmitAtomicInit(Expr *E, LValue lvalue);
3360 
3361  bool LValueIsSuitableForInlineAtomic(LValue Src);
3362 
3363  RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
3364  AggValueSlot Slot = AggValueSlot::ignored());
3365 
3366  RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
3367  llvm::AtomicOrdering AO, bool IsVolatile = false,
3368  AggValueSlot slot = AggValueSlot::ignored());
3369 
3370  void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
3371 
3372  void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
3373  bool IsVolatile, bool isInit);
3374 
3375  std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
3376  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
3377  llvm::AtomicOrdering Success =
3378  llvm::AtomicOrdering::SequentiallyConsistent,
3379  llvm::AtomicOrdering Failure =
3380  llvm::AtomicOrdering::SequentiallyConsistent,
3381  bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
3382 
3383  void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
3384  const llvm::function_ref<RValue(RValue)> &UpdateOp,
3385  bool IsVolatile);
3386 
3387  /// EmitToMemory - Change a scalar value from its value
3388  /// representation to its in-memory representation.
3389  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
3390 
3391  /// EmitFromMemory - Change a scalar value from its memory
3392  /// representation to its value representation.
3393  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
3394 
3395  /// Check if the scalar \p Value is within the valid range for the given
3396  /// type \p Ty.
3397  ///
3398  /// Returns true if a check is needed (even if the range is unknown).
3399  bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
3400  SourceLocation Loc);
3401 
3402  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3403  /// care to appropriately convert from the memory representation to
3404  /// the LLVM value representation.
3405  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3406  SourceLocation Loc,
3408  bool isNontemporal = false) {
3409  return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
3410  CGM.getTBAAAccessInfo(Ty), isNontemporal);
3411  }
3412 
3413  llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
3414  SourceLocation Loc, LValueBaseInfo BaseInfo,
3415  TBAAAccessInfo TBAAInfo,
3416  bool isNontemporal = false);
3417 
3418  /// EmitLoadOfScalar - Load a scalar value from an address, taking
3419  /// care to appropriately convert from the memory representation to
3420  /// the LLVM value representation. The l-value must be a simple
3421  /// l-value.
3422  llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
3423 
3424  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3425  /// care to appropriately convert from the memory representation to
3426  /// the LLVM value representation.
3428  bool Volatile, QualType Ty,
3430  bool isInit = false, bool isNontemporal = false) {
3431  EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
3432  CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
3433  }
3434 
3435  void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
3436  bool Volatile, QualType Ty,
3437  LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
3438  bool isInit = false, bool isNontemporal = false);
3439 
3440  /// EmitStoreOfScalar - Store a scalar value to an address, taking
3441  /// care to appropriately convert from the memory representation to
3442  /// the LLVM value representation. The l-value must be a simple
3443  /// l-value. The isInit flag indicates whether this is an initialization.
3444  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
3445  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
3446 
3447  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
3448  /// this method emits the address of the lvalue, then loads the result as an
3449  /// rvalue, returning the rvalue.
3450  RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
3451  RValue EmitLoadOfExtVectorElementLValue(LValue V);
3452  RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
3453  RValue EmitLoadOfGlobalRegLValue(LValue LV);
3454 
3455  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
3456  /// lvalue, where both are guaranteed to the have the same type, and that type
3457  /// is 'Ty'.
3458  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
3459  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
3460  void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
3461 
3462  /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
3463  /// as EmitStoreThroughLValue.
3464  ///
3465  /// \param Result [out] - If non-null, this will be set to a Value* for the
3466  /// bit-field contents after the store, appropriate for use as the result of
3467  /// an assignment to the bit-field.
3468  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
3469  llvm::Value **Result=nullptr);
3470 
3471  /// Emit an l-value for an assignment (simple or compound) of complex type.
3472  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
3473  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
3474  LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
3475  llvm::Value *&Result);
3476 
3477  // Note: only available for agg return types
3478  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
3479  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
3480  // Note: only available for agg return types
3481  LValue EmitCallExprLValue(const CallExpr *E);
3482  // Note: only available for agg return types
3483  LValue EmitVAArgExprLValue(const VAArgExpr *E);
3484  LValue EmitDeclRefLValue(const DeclRefExpr *E);
3485  LValue EmitStringLiteralLValue(const StringLiteral *E);
3486  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
3487  LValue EmitPredefinedLValue(const PredefinedExpr *E);
3488  LValue EmitUnaryOpLValue(const UnaryOperator *E);
3489  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
3490  bool Accessed = false);
3491  LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
3492  bool IsLowerBound = true);
3493  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
3494  LValue EmitMemberExpr(const MemberExpr *E);
3495  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
3496  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
3497  LValue EmitInitListLValue(const InitListExpr *E);
3498  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
3499  LValue EmitCastLValue(const CastExpr *E);
3500  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
3501  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
3502 
3503  Address EmitExtVectorElementLValue(LValue V);
3504 
3505  RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
3506 
3507  Address EmitArrayToPointerDecay(const Expr *Array,
3508  LValueBaseInfo *BaseInfo = nullptr,
3509  TBAAAccessInfo *TBAAInfo = nullptr);
3510 
3512  llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
3513  ConstantEmission(llvm::Constant *C, bool isReference)
3514  : ValueAndIsReference(C, isReference) {}
3515  public:
3517  static ConstantEmission forReference(llvm::Constant *C) {
3518  return ConstantEmission(C, true);
3519  }
3520  static ConstantEmission forValue(llvm::Constant *C) {
3521  return ConstantEmission(C, false);
3522  }
3523 
3524  explicit operator bool() const {
3525  return ValueAndIsReference.getOpaqueValue() != nullptr;
3526  }
3527 
3528  bool isReference() const { return ValueAndIsReference.getInt(); }
3529  LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
3530  assert(isReference());
3531  return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
3532  refExpr->getType());
3533  }
3534 
3535  llvm::Constant *getValue() const {
3536  assert(!isReference());
3537  return ValueAndIsReference.getPointer();
3538  }
3539  };
3540 
3541  ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
3542  ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
3543  llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
3544 
3545  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
3546  AggValueSlot slot = AggValueSlot::ignored());
3547  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
3548 
3549  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
3550  const ObjCIvarDecl *Ivar);
3551  LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
3552  LValue EmitLValueForLambdaField(const FieldDecl *Field);
3553 
3554  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
3555  /// if the Field is a reference, this will return the address of the reference
3556  /// and not the address of the value stored in the reference.
3557  LValue EmitLValueForFieldInitialization(LValue Base,
3558  const FieldDecl* Field);
3559 
3560  LValue EmitLValueForIvar(QualType ObjectTy,
3561  llvm::Value* Base, const ObjCIvarDecl *Ivar,
3562  unsigned CVRQualifiers);
3563 
3564  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
3565  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
3566  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
3567  LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
3568 
3569  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
3570  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
3571  LValue EmitStmtExprLValue(const StmtExpr *E);
3572  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
3573  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
3574  void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
3575 
3576  //===--------------------------------------------------------------------===//
3577  // Scalar Expression Emission
3578  //===--------------------------------------------------------------------===//
3579 
3580  /// EmitCall - Generate a call of the given function, expecting the given
3581  /// result type, and using the given argument list which specifies both the
3582  /// LLVM arguments and the types they were derived from.
3583  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3584  ReturnValueSlot ReturnValue, const CallArgList &Args,
3585  llvm::CallBase **callOrInvoke, SourceLocation Loc);
3586  RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
3587  ReturnValueSlot ReturnValue, const CallArgList &Args,
3588  llvm::CallBase **callOrInvoke = nullptr) {
3589  return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
3590  SourceLocation());
3591  }
3592  RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
3593  ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
3594  RValue EmitCallExpr(const CallExpr *E,
3595  ReturnValueSlot ReturnValue = ReturnValueSlot());
3596  RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3597  CGCallee EmitCallee(const Expr *E);
3598 
3599  void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
3600 
3601  llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
3602  const Twine &name = "");
3603  llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
3605  const Twine &name = "");
3606  llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3607  const Twine &name = "");
3608  llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3610  const Twine &name = "");
3611 
3613  getBundlesForFunclet(llvm::Value *Callee);
3614 
3615  llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
3617  const Twine &Name = "");
3618  llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3620  const Twine &name = "");
3621  llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3622  const Twine &name = "");
3623  void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3625 
3627  NestedNameSpecifier *Qual,
3628  llvm::Type *Ty);
3629 
3630  CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
3631  CXXDtorType Type,
3632  const CXXRecordDecl *RD);
3633 
3634  // Return the copy constructor name with the prefix "__copy_constructor_"
3635  // removed.
3636  static std::string getNonTrivialCopyConstructorStr(QualType QT,
3637  CharUnits Alignment,
3638  bool IsVolatile,
3639  ASTContext &Ctx);
3640 
3641  // Return the destructor name with the prefix "__destructor_" removed.
3642  static std::string getNonTrivialDestructorStr(QualType QT,
3643  CharUnits Alignment,
3644  bool IsVolatile,
3645  ASTContext &Ctx);
3646 
3647  // These functions emit calls to the special functions of non-trivial C
3648  // structs.
3649  void defaultInitNonTrivialCStructVar(LValue Dst);
3650  void callCStructDefaultConstructor(LValue Dst);
3651  void callCStructDestructor(LValue Dst);
3652  void callCStructCopyConstructor(LValue Dst, LValue Src);
3653  void callCStructMoveConstructor(LValue Dst, LValue Src);
3654  void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
3655  void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
3656 
3657  RValue
3658  EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method,
3659  const CGCallee &Callee,
3660  ReturnValueSlot ReturnValue, llvm::Value *This,
3661  llvm::Value *ImplicitParam,
3662  QualType ImplicitParamTy, const CallExpr *E,
3663  CallArgList *RtlArgs);
3664  RValue EmitCXXDestructorCall(GlobalDecl Dtor,
3665  const CGCallee &Callee,
3666  llvm::Value *This, llvm::Value *ImplicitParam,
3667  QualType ImplicitParamTy, const CallExpr *E);
3668  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
3669  ReturnValueSlot ReturnValue);
3670  RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
3671  const CXXMethodDecl *MD,
3672  ReturnValueSlot ReturnValue,
3673  bool HasQualifier,
3674  NestedNameSpecifier *Qualifier,
3675  bool IsArrow, const Expr *Base);
3676  // Compute the object pointer.
3677  Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
3678  llvm::Value *memberPtr,
3679  const MemberPointerType *memberPtrType,
3680  LValueBaseInfo *BaseInfo = nullptr,
3681  TBAAAccessInfo *TBAAInfo = nullptr);
3682  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
3683  ReturnValueSlot ReturnValue);
3684 
3685  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
3686  const CXXMethodDecl *MD,
3687  ReturnValueSlot ReturnValue);
3688  RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
3689 
3690  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
3691  ReturnValueSlot ReturnValue);
3692 
3693  RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
3694  ReturnValueSlot ReturnValue);
3695 
3696  RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
3697  const CallExpr *E, ReturnValueSlot ReturnValue);
3698 
3699  RValue emitRotate(const CallExpr *E, bool IsRotateRight);
3700 
3701  /// Emit IR for __builtin_os_log_format.
3702  RValue emitBuiltinOSLogFormat(const CallExpr &E);
3703 
3704  llvm::Function *generateBuiltinOSLogHelperFunction(
3705  const analyze_os_log::OSLogBufferLayout &Layout,
3706  CharUnits BufferAlignment);
3707 
3708  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
3709 
3710  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
3711  /// is unhandled by the current target.
3712  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3713 
3714  llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
3715  const llvm::CmpInst::Predicate Fp,
3716  const llvm::CmpInst::Predicate Ip,
3717  const llvm::Twine &Name = "");
3718  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3719  llvm::Triple::ArchType Arch);
3720 
3721  llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
3722  unsigned LLVMIntrinsic,
3723  unsigned AltLLVMIntrinsic,
3724  const char *NameHint,
3725  unsigned Modifier,
3726  const CallExpr *E,
3728  Address PtrOp0, Address PtrOp1,
3729  llvm::Triple::ArchType Arch);
3730 
3731  llvm::Value *EmitISOVolatileLoad(const CallExpr *E);
3732  llvm::Value *EmitISOVolatileStore(const CallExpr *E);
3733 
3734  llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
3735  unsigned Modifier, llvm::Type *ArgTy,
3736  const CallExpr *E);
3737  llvm::Value *EmitNeonCall(llvm::Function *F,
3739  const char *name,
3740  unsigned shift = 0, bool rightshift = false);
3741  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
3742  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
3743  bool negateForRightShift);
3744  llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
3745  llvm::Type *Ty, bool usgn, const char *name);
3746  llvm::Value *vectorWrapScalar16(llvm::Value *Op);
3747  llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3748  llvm::Triple::ArchType Arch);
3749 
3750  llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
3751  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3752  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3753  llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3754  llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3755  llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3756  llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
3757  const CallExpr *E);
3758  llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
3759 
3760 private:
3761  enum class MSVCIntrin;
3762 
3763 public:
3764  llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
3765 
3766  llvm::Value *EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args);
3767 
3768  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
3769  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
3770  llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
3771  llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
3772  llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
3773  llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
3774  const ObjCMethodDecl *MethodWithObjects);
3775  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
3776  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
3777  ReturnValueSlot Return = ReturnValueSlot());
3778 
3779  /// Retrieves the default cleanup kind for an ARC cleanup.
3780  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
3782  return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
3784  }
3785 
3786  // ARC primitives.
3787  void EmitARCInitWeak(Address addr, llvm::Value *value);
3788  void EmitARCDestroyWeak(Address addr);
3789  llvm::Value *EmitARCLoadWeak(Address addr);
3790  llvm::Value *EmitARCLoadWeakRetained(Address addr);
3791  llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
3792  void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3793  void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
3794  void EmitARCCopyWeak(Address dst, Address src);
3795  void EmitARCMoveWeak(Address dst, Address src);
3796  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
3797  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
3798  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
3799  bool resultIgnored);
3800  llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
3801  bool resultIgnored);
3802  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
3803  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
3804  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
3805  void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
3806  void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
3807  llvm::Value *EmitARCAutorelease(llvm::Value *value);
3808  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
3809  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
3810  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
3811  llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
3812 
3813  llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
3814  llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
3815  llvm::Type *returnType);
3816  void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
3817 
3818  std::pair<LValue,llvm::Value*>
3819  EmitARCStoreAutoreleasing(const BinaryOperator *e);
3820  std::pair<LValue,llvm::Value*>
3821  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
3822  std::pair<LValue,llvm::Value*>
3823  EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
3824 
3825  llvm::Value *EmitObjCAlloc(llvm::Value *value,
3826  llvm::Type *returnType);
3827  llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
3828  llvm::Type *returnType);
3829  llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
3830 
3831  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
3832  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
3833  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
3834 
3835  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
3836  llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
3837  bool allowUnsafeClaim);
3838  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
3839  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
3840  llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr);
3841 
3842  void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
3843 
3844  static Destroyer destroyARCStrongImprecise;
3845  static Destroyer destroyARCStrongPrecise;
3846  static Destroyer destroyARCWeak;
3847  static Destroyer emitARCIntrinsicUse;
3848  static Destroyer destroyNonTrivialCStruct;
3849 
3850  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
3851  llvm::Value *EmitObjCAutoreleasePoolPush();
3852  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
3853  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
3854  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
3855 
3856  /// Emits a reference binding to the passed in expression.
3857  RValue EmitReferenceBindingToExpr(const Expr *E);
3858 
3859  //===--------------------------------------------------------------------===//
3860  // Expression Emission
3861  //===--------------------------------------------------------------------===//
3862 
3863  // Expressions are broken into three classes: scalar, complex, aggregate.
3864 
3865  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
3866  /// scalar type, returning the result.
3867  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
3868 
3869  /// Emit a conversion from the specified type to the specified destination
3870  /// type, both of which are LLVM scalar types.
3871  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
3872  QualType DstTy, SourceLocation Loc);
3873 
3874  /// Emit a conversion from the specified complex type to the specified
3875  /// destination type, where the destination type is an LLVM scalar type.
3876  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
3877  QualType DstTy,
3878  SourceLocation Loc);
3879 
3880  /// EmitAggExpr - Emit the computation of the specified expression
3881  /// of aggregate type. The result is computed into the given slot,
3882  /// which may be null to indicate that the value is not needed.
3883  void EmitAggExpr(const Expr *E, AggValueSlot AS);
3884 
3885  /// EmitAggExprToLValue - Emit the computation of the specified expression of
3886  /// aggregate type into a temporary LValue.
3887  LValue EmitAggExprToLValue(const Expr *E);
3888 
3889  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3890  /// make sure it survives garbage collection until this point.
3891  void EmitExtendGCLifetime(llvm::Value *object);
3892 
3893  /// EmitComplexExpr - Emit the computation of the specified expression of
3894  /// complex type, returning the result.
3895  ComplexPairTy EmitComplexExpr(const Expr *E,
3896  bool IgnoreReal = false,
3897  bool IgnoreImag = false);
3898 
3899  /// EmitComplexExprIntoLValue - Emit the given expression of complex
3900  /// type and place its result into the specified l-value.
3901  void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
3902 
3903  /// EmitStoreOfComplex - Store a complex number into the specified l-value.
3904  void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
3905 
3906  /// EmitLoadOfComplex - Load a complex number from the specified l-value.
3907  ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
3908 
3909  Address emitAddrOfRealComponent(Address complex, QualType complexType);
3910  Address emitAddrOfImagComponent(Address complex, QualType complexType);
3911 
3912  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
3913  /// global variable that has already been created for it. If the initializer
3914  /// has a different type than GV does, this may free GV and return a different
3915  /// one. Otherwise it just returns GV.
3916  llvm::GlobalVariable *
3917  AddInitializerToStaticVarDecl(const VarDecl &D,
3918  llvm::GlobalVariable *GV);
3919 
3920  // Emit an @llvm.invariant.start call for the given memory region.
3921  void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
3922 
3923  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
3924  /// variable with global storage.
3925  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
3926  bool PerformInit);
3927 
3928  llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
3929  llvm::Constant *Addr);
3930 
3931  /// Call atexit() with a function that passes the given argument to
3932  /// the given function.
3933  void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
3934  llvm::Constant *addr);
3935 
3936  /// Call atexit() with function dtorStub.
3937  void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
3938 
3939  /// Emit code in this function to perform a guarded variable
3940  /// initialization. Guarded initializations are used when it's not
3941  /// possible to prove that an initialization will be done exactly
3942  /// once, e.g. with a static local variable or a static data member
3943  /// of a class template.
3944  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
3945  bool PerformInit);
3946 
3947  enum class GuardKind { VariableGuard, TlsGuard };
3948 
3949  /// Emit a branch to select whether or not to perform guarded initialization.
3950  void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
3951  llvm::BasicBlock *InitBlock,
3952  llvm::BasicBlock *NoInitBlock,
3953  GuardKind Kind, const VarDecl *D);
3954 
3955  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
3956  /// variables.
3957  void
3958  GenerateCXXGlobalInitFunc(llvm::Function *Fn,
3959  ArrayRef<llvm::Function *> CXXThreadLocals,
3960  ConstantAddress Guard = ConstantAddress::invalid());
3961 
3962  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
3963  /// variables.
3964  void GenerateCXXGlobalDtorsFunc(
3965  llvm::Function *Fn,
3966  const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
3967  llvm::Constant *>> &DtorsAndObjects);
3968 
3969  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
3970  const VarDecl *D,
3971  llvm::GlobalVariable *Addr,
3972  bool PerformInit);
3973 
3974  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
3975 
3976  void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
3977 
3979  if (const auto *EWC = dyn_cast<ExprWithCleanups>(E))
3980  if (EWC->getNumObjects() == 0)
3981  return;
3982  enterNonTrivialFullExpression(E);
3983  }
3984  void enterNonTrivialFullExpression(const FullExpr *E);
3985 
3986  void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
3987 
3988  RValue EmitAtomicExpr(AtomicExpr *E);
3989 
3990  //===--------------------------------------------------------------------===//
3991  // Annotations Emission
3992  //===--------------------------------------------------------------------===//
3993 
3994  /// Emit an annotation call (intrinsic).
3995  llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
3996  llvm::Value *AnnotatedVal,
3997  StringRef AnnotationStr,
3998  SourceLocation Location);
3999 
4000  /// Emit local annotations for the local variable V, declared by D.
4001  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
4002 
4003  /// Emit field annotations for the given field & value. Returns the
4004  /// annotation result.
4005  Address EmitFieldAnnotations(const FieldDecl *D, Address V);
4006 
4007  //===--------------------------------------------------------------------===//
4008  // Internal Helpers
4009  //===--------------------------------------------------------------------===//
4010 
4011  /// ContainsLabel - Return true if the statement contains a label in it. If
4012  /// this statement is not executed normally, it not containing a label means
4013  /// that we can just remove the code.
4014  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
4015 
4016  /// containsBreak - Return true if the statement contains a break out of it.
4017  /// If the statement (recursively) contains a switch or loop with a break
4018  /// inside of it, this is fine.
4019  static bool containsBreak(const Stmt *S);
4020 
4021  /// Determine if the given statement might introduce a declaration into the
4022  /// current scope, by being a (possibly-labelled) DeclStmt.
4023  static bool mightAddDeclToScope(const Stmt *S);
4024 
4025  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4026  /// to a constant, or if it does but contains a label, return false. If it
4027  /// constant folds return true and set the boolean result in Result.
4028  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result,
4029  bool AllowLabels = false);
4030 
4031  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4032  /// to a constant, or if it does but contains a label, return false. If it
4033  /// constant folds return true and set the folded value.
4034  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
4035  bool AllowLabels = false);
4036 
4037  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
4038  /// if statement) to the specified blocks. Based on the condition, this might
4039  /// try to simplify the codegen of the conditional based on the branch.
4040  /// TrueCount should be the number of times we expect the condition to
4041  /// evaluate to true based on PGO data.
4042  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
4043  llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
4044 
4045  /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
4046  /// nonnull, if \p LHS is marked _Nonnull.
4047  void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
4048 
4049  /// An enumeration which makes it easier to specify whether or not an
4050  /// operation is a subtraction.
4051  enum { NotSubtraction = false, IsSubtraction = true };
4052 
4053  /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
4054  /// detect undefined behavior when the pointer overflow sanitizer is enabled.
4055  /// \p SignedIndices indicates whether any of the GEP indices are signed.
4056  /// \p IsSubtraction indicates whether the expression used to form the GEP
4057  /// is a subtraction.
4058  llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
4059  ArrayRef<llvm::Value *> IdxList,
4060  bool SignedIndices,
4061  bool IsSubtraction,
4062  SourceLocation Loc,
4063  const Twine &Name = "");
4064 
4065  /// Specifies which type of sanitizer check to apply when handling a
4066  /// particular builtin.
4070  };
4071 
4072  /// Emits an argument for a call to a builtin. If the builtin sanitizer is
4073  /// enabled, a runtime check specified by \p Kind is also emitted.
4074  llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
4075 
4076  /// Emit a description of a type in a format suitable for passing to
4077  /// a runtime sanitizer handler.
4078  llvm::Constant *EmitCheckTypeDescriptor(QualType T);
4079 
4080  /// Convert a value into a format suitable for passing to a runtime
4081  /// sanitizer handler.
4082  llvm::Value *EmitCheckValue(llvm::Value *V);
4083 
4084  /// Emit a description of a source location in a format suitable for
4085  /// passing to a runtime sanitizer handler.
4086  llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
4087 
4088  /// Create a basic block that will either trap or call a handler function in
4089  /// the UBSan runtime with the provided arguments, and create a conditional
4090  /// branch to it.
4091  void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
4092  SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
4093  ArrayRef<llvm::Value *> DynamicArgs);
4094 
4095  /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
4096  /// if Cond if false.
4097  void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
4098  llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4099  ArrayRef<llvm::Constant *> StaticArgs);
4100 
4101  /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
4102  /// checking is enabled. Otherwise, just emit an unreachable instruction.
4103  void EmitUnreachable(SourceLocation Loc);
4104 
4105  /// Create a basic block that will call the trap intrinsic, and emit a
4106  /// conditional branch to it, for the -ftrapv checks.
4107  void EmitTrapCheck(llvm::Value *Checked);
4108 
4109  /// Emit a call to trap or debugtrap and attach function attribute
4110  /// "trap-func-name" if specified.
4111  llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
4112 
4113  /// Emit a stub for the cross-DSO CFI check function.
4114  void EmitCfiCheckStub();
4115 
4116  /// Emit a cross-DSO CFI failure handling function.
4117  void EmitCfiCheckFail();
4118 
4119  /// Create a check for a function parameter that may potentially be
4120  /// declared as non-null.
4121  void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
4122  AbstractCallee AC, unsigned ParmNum);
4123 
4124  /// EmitCallArg - Emit a single call argument.
4125  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
4126 
4127  /// EmitDelegateCallArg - We are performing a delegate call; that
4128  /// is, the current function is delegating to another one. Produce
4129  /// a r-value suitable for passing the given parameter.
4130  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
4131  SourceLocation loc);
4132 
4133  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
4134  /// point operation, expressed as the maximum relative error in ulp.
4135  void SetFPAccuracy(llvm::Value *Val, float Accuracy);
4136 
4137 private:
4138  llvm::MDNode *getRangeForLoadFromType(QualType Ty);
4139  void EmitReturnOfRValue(RValue RV, QualType Ty);
4140 
4141  void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
4142 
4144  DeferredReplacements;
4145 
4146  /// Set the address of a local variable.
4147  void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
4148  assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
4149  LocalDeclMap.insert({VD, Addr});
4150  }
4151 
4152  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
4153  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
4154  ///
4155  /// \param AI - The first function argument of the expansion.
4156  void ExpandTypeFromArgs(QualType Ty, LValue Dst,
4158 
4159  /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
4160  /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
4161  /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
4162  void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
4163  SmallVectorImpl<llvm::Value *> &IRCallArgs,
4164  unsigned &IRCallArgPos);
4165 
4166  llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
4167  const Expr *InputExpr, std::string &ConstraintStr);
4168 
4169  llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
4170  LValue InputValue, QualType InputType,
4171  std::string &ConstraintStr,
4172  SourceLocation Loc);
4173 
4174  /// Attempts to statically evaluate the object size of E. If that
4175  /// fails, emits code to figure the size of E out for us. This is
4176  /// pass_object_size aware.
4177  ///
4178  /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
4179  llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
4180  llvm::IntegerType *ResType,
4181  llvm::Value *EmittedE,
4182  bool IsDynamic);
4183 
4184  /// Emits the size of E, as required by __builtin_object_size. This
4185  /// function is aware of pass_object_size parameters, and will act accordingly
4186  /// if E is a parameter with the pass_object_size attribute.
4187  llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
4188  llvm::IntegerType *ResType,
4189  llvm::Value *EmittedE,
4190  bool IsDynamic);
4191 
4192 public:
4193 #ifndef NDEBUG
4194  // Determine whether the given argument is an Objective-C method
4195  // that may have type parameters in its signature.
4196  static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4197  const DeclContext *dc = method->getDeclContext();
4198  if (const ObjCInterfaceDecl *classDecl= dyn_cast<ObjCInterfaceDecl>(dc)) {
4199  return classDecl->getTypeParamListAsWritten();
4200  }
4201 
4202  if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4203  return catDecl->getTypeParamList();
4204  }
4205 
4206  return false;
4207  }
4208 
4209  template<typename T>
4210  static bool isObjCMethodWithTypeParams(const T *) { return false; }
4211 #endif
4212 
4213  enum class EvaluationOrder {
4214  ///! No language constraints on evaluation order.
4215  Default,
4216  ///! Language semantics require left-to-right evaluation.
4217  ForceLeftToRight,
4218  ///! Language semantics require right-to-left evaluation.
4219  ForceRightToLeft
4220  };
4221 
4222  /// EmitCallArgs - Emit call arguments for a function.
4223  template <typename T>
4224  void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
4225  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4227  unsigned ParamsToSkip = 0,
4228  EvaluationOrder Order = EvaluationOrder::Default) {
4229  SmallVector<QualType, 16> ArgTypes;
4230  CallExpr::const_arg_iterator Arg = ArgRange.begin();
4231 
4232  assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
4233  "Can't skip parameters if type info is not provided");
4234  if (CallArgTypeInfo) {
4235 #ifndef NDEBUG
4236  bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo);
4237 #endif
4238 
4239  // First, use the argument types that the type info knows about
4240  for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
4241  E = CallArgTypeInfo->param_type_end();
4242  I != E; ++I, ++Arg) {
4243  assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4244  assert((isGenericMethod ||
4245  ((*I)->isVariablyModifiedType() ||
4246  (*I).getNonReferenceType()->isObjCRetainableType() ||
4247  getContext()
4248  .getCanonicalType((*I).getNonReferenceType())
4249  .getTypePtr() ==
4250  getContext()
4251  .getCanonicalType((*Arg)->getType())
4252  .getTypePtr())) &&
4253  "type mismatch in call argument!");
4254  ArgTypes.push_back(*I);
4255  }
4256  }
4257 
4258  // Either we've emitted all the call args, or we have a call to variadic
4259  // function.
4260  assert((Arg == ArgRange.end() || !CallArgTypeInfo ||
4261  CallArgTypeInfo->isVariadic()) &&
4262  "Extra arguments in non-variadic function!");
4263 
4264  // If we still have any arguments, emit them using the type of the argument.
4265  for (auto *A : llvm::make_range(Arg, ArgRange.end()))
4266  ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType());
4267 
4268  EmitCallArgs(Args, ArgTypes, ArgRange, AC, ParamsToSkip, Order);
4269  }
4270 
4271  void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
4272  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4274  unsigned ParamsToSkip = 0,
4275  EvaluationOrder Order = EvaluationOrder::Default);
4276 
4277  /// EmitPointerWithAlignment - Given an expression with a pointer type,
4278  /// emit the value and compute our best estimate of the alignment of the
4279  /// pointee.
4280  ///
4281  /// \param BaseInfo - If non-null, this will be initialized with
4282  /// information about the source of the alignment and the may-alias
4283  /// attribute. Note that this function will conservatively fall back on
4284  /// the type when it doesn't recognize the expression and may-alias will
4285  /// be set to false.
4286  ///
4287  /// One reasonable way to use this information is when there's a language
4288  /// guarantee that the pointer must be aligned to some stricter value, and
4289  /// we're simply trying to ensure that sufficiently obvious uses of under-
4290  /// aligned objects don't get miscompiled; for example, a placement new
4291  /// into the address of a local variable. In such a case, it's quite
4292  /// reasonable to just ignore the returned alignment when it isn't from an
4293  /// explicit source.
4294  Address EmitPointerWithAlignment(const Expr *Addr,
4295  LValueBaseInfo *BaseInfo = nullptr,
4296  TBAAAccessInfo *TBAAInfo = nullptr);
4297 
4298  /// If \p E references a parameter with pass_object_size info or a constant
4299  /// array size modifier, emit the object size divided by the size of \p EltTy.
4300  /// Otherwise return null.
4301  llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
4302 
4303  void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
4304 
4306  llvm::Function *Function;
4308  struct Conds {
4309  StringRef Architecture;
4311 
4312  Conds(StringRef Arch, ArrayRef<StringRef> Feats)
4313  : Architecture(Arch), Features(Feats.begin(), Feats.end()) {}
4314  } Conditions;
4315 
4316  MultiVersionResolverOption(llvm::Function *F, StringRef Arch,
4317  ArrayRef<StringRef> Feats)
4318  : Function(F), Conditions(Arch, Feats) {}
4319  };
4320 
4321  // Emits the body of a multiversion function's resolver. Assumes that the
4322  // options are already sorted in the proper order, with the 'default' option
4323  // last (if it exists).
4324  void EmitMultiVersionResolver(llvm::Function *Resolver,
4326 
4327  static uint64_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
4328 
4329 private:
4330  QualType getVarArgType(const Expr *Arg);
4331 
4332  void EmitDeclMetadata();
4333 
4334  BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
4335  const AutoVarEmission &emission);
4336 
4337  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
4338 
4339  llvm::Value *GetValueForARMHint(unsigned BuiltinID);
4340  llvm::Value *EmitX86CpuIs(const CallExpr *E);
4341  llvm::Value *EmitX86CpuIs(StringRef CPUStr);
4342  llvm::Value *EmitX86CpuSupports(const CallExpr *E);
4343  llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
4344  llvm::Value *EmitX86CpuSupports(uint64_t Mask);
4345  llvm::Value *EmitX86CpuInit();
4346  llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
4347 };
4348 
4350 DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
4351  if (!needsSaving(value)) return saved_type(value, false);
4352 
4353  // Otherwise, we need an alloca.
4354  auto align = CharUnits::fromQuantity(
4355  CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
4356  Address alloca =
4357  CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
4358  CGF.Builder.CreateStore(value, alloca);
4359 
4360  return saved_type(alloca.getPointer(), true);
4361 }
4362 
4363 inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
4364  saved_type value) {
4365  // If the value says it wasn't saved, trust that it's still dominating.
4366  if (!value.getInt()) return value.getPointer();
4367 
4368  // Otherwise, it should be an alloca instruction, as set up in save().
4369  auto alloca = cast<llvm::AllocaInst>(value.getPointer());
4370  return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
4371 }
4372 
4373 } // end namespace CodeGen
4374 } // end namespace clang
4375 
4376 #endif
const llvm::DataLayout & getDataLayout() const
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:77
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:363
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
Optional< uint64_t > getStmtCount(const Stmt *S)
Check if an execution count is known for a given statement.
Definition: CodeGenPGO.h:62
This represents &#39;#pragma omp distribute simd&#39; composite directive.
Definition: StmtOpenMP.h:3263
Information about the layout of a __block variable.
Definition: CGBlocks.h:143
This represents &#39;#pragma omp master&#39; directive.
Definition: StmtOpenMP.h:1446
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
This represents &#39;#pragma omp task&#39; directive.
Definition: StmtOpenMP.h:1786
Represents a function declaration or definition.
Definition: Decl.h:1737
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2547
Scheduling data for loop-based OpenMP directives.
Definition: OpenMPKinds.h:157
A (possibly-)qualified type.
Definition: Type.h:639
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Type *Ty, const CXXRecordDecl *RD)
Definition: CGCXX.cpp:246
const CodeGenOptions & getCodeGenOpts() const
The class detects jumps which bypass local variables declaration: goto L; int a; L: ...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:138
void enterFullExpression(const FullExpr *E)
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:125
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::LLVMContext & getLLVMContext()
DominatorTree GraphTraits specialization so the DominatorTree can be iterable by generic graph iterat...
Definition: Dominators.h:29
FieldConstructionScope(CodeGenFunction &CGF, Address This)
Represents a &#39;co_return&#39; statement in the C++ Coroutines TS.
Definition: StmtCXX.h:434
Stmt - This represents one statement.
Definition: Stmt.h:65
IfStmt - This represents an if/then/else.
Definition: Stmt.h:1720
static T * buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo, T &&generator)
Lazily build the copy and dispose helpers for a __block variable with the given information.
Definition: CGBlocks.cpp:2582
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
C Language Family Type Representation.
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it&#39;s the GNU ...
This represents &#39;#pragma omp for simd&#39; directive.
Definition: StmtOpenMP.h:1196
Checking the &#39;this&#39; pointer for a constructor call.
bool hasVolatileMember() const
Definition: Decl.h:3676
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
This represents &#39;#pragma omp teams distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3674
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
static bool classof(const CGCapturedStmtInfo *)
Represents an attribute applied to a statement.
Definition: Stmt.h:1666
static Destroyer destroyARCStrongPrecise
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of &#39;this&#39;.
The base class of the type hierarchy.
Definition: Type.h:1414
This represents &#39;#pragma omp target teams distribute&#39; combined directive.
Definition: StmtOpenMP.h:3811
Represents Objective-C&#39;s @throw statement.
Definition: StmtObjC.h:312
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition: Stmt.h:3256
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2822
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1261
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
DominatingValue< T >::saved_type saveValueInCond(T value)
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const ParmVarDecl * getParamDecl(unsigned I) const
const llvm::function_ref< void(CodeGenFunction &, llvm::Function *, const OMPTaskDataTy &)> TaskGenTy
This represents &#39;#pragma omp parallel for&#39; directive.
Definition: StmtOpenMP.h:1567
void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S, llvm::Value *StepV)
Definition: CodeGenPGO.cpp:892
This represents &#39;#pragma omp target teams distribute parallel for&#39; combined directive.
Definition: StmtOpenMP.h:3879
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2491
Represents a prvalue temporary that is written into memory so that a reference can bind to it...
Definition: ExprCXX.h:4155
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
static type restore(CodeGenFunction &CGF, saved_type value)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
Represents a point when we exit a loop.
Definition: ProgramPoint.h:714
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3105
This represents &#39;#pragma omp target exit data&#39; directive.
Definition: StmtOpenMP.h:2478
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:812
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke=nullptr)
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC &#39;id&#39; type.
Definition: ExprObjC.h:1443
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:2963
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6766
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:53
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:138
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
llvm::Value * getPointer() const
Definition: Address.h:37
static ConstantEmission forValue(llvm::Constant *C)
capture_iterator capture_begin()
Retrieve an iterator pointing to the first capture.
Definition: Stmt.h:3281
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1014
Represents a parameter to a function.
Definition: Decl.h:1549
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have...
Definition: Linkage.h:23
Defines the clang::Expr interface and subclasses for C++ expressions.
The collection of all-type qualifiers we support.
Definition: Type.h:137
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:1626
Represents a struct/union/class.
Definition: Decl.h:3592
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:197
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
void setScopeDepth(EHScopeStack::stable_iterator depth)
This represents &#39;#pragma omp parallel&#39; directive.
Definition: StmtOpenMP.h:291
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:910
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
Represents a member of a struct/union/class.
Definition: Decl.h:2578
Definition: Format.h:2153
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
bool isReferenceType() const
Definition: Type.h:6318
Helper class with most of the code for saving a value for a conditional expression cleanup...
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code...
This represents &#39;#pragma omp target simd&#39; directive.
Definition: StmtOpenMP.h:3399
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1036
Defines some OpenMP-specific enums and functions.
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:5323
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself...
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function...
Definition: EHScopeStack.h:65
This represents &#39;#pragma omp barrier&#39; directive.
Definition: StmtOpenMP.h:1898
CleanupKind getCleanupKind(QualType::DestructionKind kind)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:49
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp, [NSNumber numberWithInt:42]];.
Definition: ExprObjC.h:176
The this pointer adjustment as well as an optional return adjustment for a thunk. ...
Definition: ABI.h:178
This is a common base class for loop directives (&#39;omp simd&#39;, &#39;omp for&#39;, &#39;omp for simd&#39; etc...
Definition: StmtOpenMP.h:353
This represents &#39;#pragma omp critical&#39; directive.
Definition: StmtOpenMP.h:1493
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:193
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
OpenMPDistScheduleClauseKind
OpenMP attributes for &#39;dist_schedule&#39; clause.
Definition: OpenMPKinds.h:125
bool isGLValue() const
Definition: Expr.h:254
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2372
Describes an C or C++ initializer list.
Definition: Expr.h:4214
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:668
This represents &#39;#pragma omp distribute parallel for&#39; composite directive.
Definition: StmtOpenMP.h:3114
void setCurrentRegionCount(uint64_t Count)
Set the counter value for the current region.
Definition: CodeGenPGO.h:58
A class controlling the emission of a finally block.
This represents &#39;#pragma omp teams distribute parallel for simd&#39; composite directive.
Definition: StmtOpenMP.h:3603
BinaryOperatorKind
static bool hasScalarEvaluationKind(QualType T)
ForStmt - This represents a &#39;for (init;cond;inc)&#39; stmt.
Definition: Stmt.h:2270
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
MultiVersionResolverOption(llvm::Function *F, StringRef Arch, ArrayRef< StringRef > Feats)
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:968
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
llvm::function_ref< std::pair< LValue, LValue > CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
LexicalScope(CodeGenFunction &CGF, SourceRange Range)
Enter a new cleanup scope.
RAII for correct setting/restoring of CapturedStmtInfo.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
Represents a declaration of a type.
Definition: Decl.h:2873
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3313
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind...
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
CXXForRangeStmt - This represents C++0x [stmt.ranged]&#39;s ranged for statement, represented as &#39;for (ra...
Definition: StmtCXX.h:126
#define LIST_SANITIZER_CHECKS
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
This represents &#39;#pragma omp cancellation point&#39; directive.
Definition: StmtOpenMP.h:2733
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:50
Expr * getSizeExpr() const
Definition: Type.h:3001
field_iterator field_begin() const
Definition: Decl.cpp:4154
CaseStmt - Represent a case statement.
Definition: Stmt.h:1404
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:99
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
This represents &#39;#pragma omp teams&#39; directive.
Definition: StmtOpenMP.h:2676
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
Enums/classes describing ABI related information about constructors, destructors and thunks...
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3036
This represents &#39;#pragma omp teams distribute simd&#39; combined directive.
Definition: StmtOpenMP.h:3533
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1216
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
Controls insertion of cancellation exit blocks in worksharing constructs.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
CallLifetimeEnd(Address addr, llvm::Value *size)
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * > CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:105
Represents an ObjC class declaration.
Definition: DeclObjC.h:1171
Checking the operand of a cast to a virtual base object.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
Checking the operand of a load. Must be suitably sized and aligned.
~LexicalScope()
Exit this cleanup scope, emitting any accumulated cleanups.
Checking the &#39;this&#39; pointer for a call to a non-static member function.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2758
This represents &#39;#pragma omp target parallel for simd&#39; directive.
Definition: StmtOpenMP.h:3331
OpenMP 4.0 [2.4, Array Sections].
Definition: ExprOpenMP.h:44
bool isValid() const
Definition: Address.h:35
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2285
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1251
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3697
Describes the capture of either a variable, or &#39;this&#39;, or variable-length array type.
Definition: Stmt.h:3151
This represents &#39;#pragma omp taskgroup&#39; directive.
Definition: StmtOpenMP.h:1986
const TargetCodeGenInfo & getTargetCodeGenInfo()
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:152
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
The class used to assign some variables some temporarily addresses.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4120
AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression.
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
This represents &#39;#pragma omp distribute&#39; directive.
Definition: StmtOpenMP.h:2987
Exposes information about the current target.
Definition: TargetInfo.h:161
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
bool addPrivate(const VarDecl *LocalVD, const llvm::function_ref< Address()> PrivateGen)
Registers LocalVD variable as a private and apply PrivateGen function for it to generate correspondin...
EHScopeStack::stable_iterator getScopeDepth() const
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:636
This represents one expression.
Definition: Expr.h:108
Address getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups...
Definition: EHScopeStack.h:355
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
static ParamValue forIndirect(Address addr)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:5384
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2713
This represents &#39;#pragma omp target teams distribute parallel for simd&#39; combined directive.
Definition: StmtOpenMP.h:3963
static saved_type save(CodeGenFunction &CGF, type value)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp")
CreateAggTemp - Create a temporary memory object for the given aggregate type.
#define bool
Definition: stdbool.h:31
unsigned Kind
The kind of cleanup to push: a value from the CleanupKind enumeration.
unsigned Size
The size of the following cleanup object.
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:287
DeclContext * getDeclContext()
<