clang 20.0.0git
CodeGenFunction.h
Go to the documentation of this file.
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGDebugInfo.h"
18#include "CGLoopInfo.h"
19#include "CGValue.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "EHScopeStack.h"
23#include "VarBypassDetector.h"
24#include "clang/AST/CharUnits.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class LLVMContext;
51class MDNode;
52class SwitchInst;
53class Twine;
54class Value;
55class CanonicalLoopInfo;
56}
57
58namespace clang {
59class ASTContext;
60class CXXDestructorDecl;
61class CXXForRangeStmt;
62class CXXTryStmt;
63class Decl;
64class LabelDecl;
65class FunctionDecl;
66class FunctionProtoType;
67class LabelStmt;
68class ObjCContainerDecl;
69class ObjCInterfaceDecl;
70class ObjCIvarDecl;
71class ObjCMethodDecl;
72class ObjCImplementationDecl;
73class ObjCPropertyImplDecl;
74class TargetInfo;
75class VarDecl;
76class ObjCForCollectionStmt;
77class ObjCAtTryStmt;
78class ObjCAtThrowStmt;
79class ObjCAtSynchronizedStmt;
80class ObjCAutoreleasePoolStmt;
81class OMPUseDevicePtrClause;
82class OMPUseDeviceAddrClause;
83class SVETypeFlags;
84class OMPExecutableDirective;
85
86namespace analyze_os_log {
87class OSLogBufferLayout;
88}
89
90namespace CodeGen {
91class CodeGenTypes;
92class CGCallee;
93class CGFunctionInfo;
94class CGBlockInfo;
95class CGCXXABI;
96class BlockByrefHelpers;
97class BlockByrefInfo;
98class BlockFieldFlags;
99class RegionCodeGenTy;
100class TargetCodeGenInfo;
101struct OMPTaskDataTy;
102struct CGCoroData;
103
104/// The kind of evaluation to perform on values of a particular
105/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
106/// CGExprAgg?
107///
108/// TODO: should vectors maybe be split out into their own thing?
114
115#define LIST_SANITIZER_CHECKS \
116 SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
117 SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
118 SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
119 SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
120 SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
121 SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
122 SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
123 SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
124 SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
125 SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
126 SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
127 SANITIZER_CHECK(MissingReturn, missing_return, 0) \
128 SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
129 SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
130 SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
131 SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
132 SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
133 SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
134 SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
135 SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
136 SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
137 SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
138 SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
139 SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
140 SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) \
141 SANITIZER_CHECK(BoundsSafety, bounds_safety, 0)
142
144#define SANITIZER_CHECK(Enum, Name, Version) Enum,
146#undef SANITIZER_CHECK
148
149/// Helper class with most of the code for saving a value for a
150/// conditional expression cleanup.
152 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
153
154 /// Answer whether the given value needs extra work to be saved.
155 static bool needsSaving(llvm::Value *value) {
156 if (!value)
157 return false;
158
159 // If it's not an instruction, we don't need to save.
160 if (!isa<llvm::Instruction>(value)) return false;
161
162 // If it's an instruction in the entry block, we don't need to save.
163 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
164 return (block != &block->getParent()->getEntryBlock());
165 }
166
167 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
168 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
169};
170
171/// A partial specialization of DominatingValue for llvm::Values that
172/// might be llvm::Instructions.
173template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
174 typedef T *type;
176 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
177 }
178};
179
180/// A specialization of DominatingValue for Address.
181template <> struct DominatingValue<Address> {
182 typedef Address type;
183
184 struct saved_type {
186 llvm::Type *ElementType;
189 llvm::PointerType *EffectiveType;
190 };
191
192 static bool needsSaving(type value) {
195 return true;
196 return false;
197 }
198 static saved_type save(CodeGenFunction &CGF, type value) {
199 return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
200 value.getElementType(), value.getAlignment(),
201 DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
202 }
204 return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
205 value.ElementType, value.Alignment, CGPointerAuthInfo(),
206 DominatingLLVMValue::restore(CGF, value.Offset));
207 }
208};
209
210/// A specialization of DominatingValue for RValue.
211template <> struct DominatingValue<RValue> {
212 typedef RValue type;
214 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
215 AggregateAddress, ComplexAddress };
216 union {
217 struct {
219 } Vals;
221 };
222 LLVM_PREFERRED_TYPE(Kind)
223 unsigned K : 3;
224
226 : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
227
230 : Vals{Val1, Val2}, K(ComplexAddress) {}
231
232 saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
233 : AggregateAddr(AggregateAddr), K(K) {}
234
235 public:
236 static bool needsSaving(RValue value);
239
240 // implementations in CGCleanup.cpp
241 };
242
243 static bool needsSaving(type value) {
244 return saved_type::needsSaving(value);
245 }
246 static saved_type save(CodeGenFunction &CGF, type value) {
247 return saved_type::save(CGF, value);
248 }
250 return value.restore(CGF);
251 }
252};
253
254/// CodeGenFunction - This class organizes the per-function state that is used
255/// while generating LLVM code.
257 CodeGenFunction(const CodeGenFunction &) = delete;
258 void operator=(const CodeGenFunction &) = delete;
259
260 friend class CGCXXABI;
261public:
262 /// A jump destination is an abstract label, branching to which may
263 /// require a jump out through normal cleanups.
264 struct JumpDest {
265 JumpDest() : Block(nullptr), Index(0) {}
266 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
267 unsigned Index)
268 : Block(Block), ScopeDepth(Depth), Index(Index) {}
269
270 bool isValid() const { return Block != nullptr; }
271 llvm::BasicBlock *getBlock() const { return Block; }
272 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
273 unsigned getDestIndex() const { return Index; }
274
275 // This should be used cautiously.
277 ScopeDepth = depth;
278 }
279
280 private:
281 llvm::BasicBlock *Block;
283 unsigned Index;
284 };
285
286 CodeGenModule &CGM; // Per-module state.
288
289 // For EH/SEH outlined funclets, this field points to parent's CGF
291
292 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
295
296 // Stores variables for which we can't generate correct lifetime markers
297 // because of jumps.
299
300 /// List of recently emitted OMPCanonicalLoops.
301 ///
302 /// Since OMPCanonicalLoops are nested inside other statements (in particular
303 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
304 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
305 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
306 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
307 /// this stack when done. Entering a new loop requires clearing this list; it
308 /// either means we start parsing a new loop nest (in which case the previous
309 /// loop nest goes out of scope) or a second loop in the same level in which
310 /// case it would be ambiguous into which of the two (or more) loops the loop
311 /// nest would extend.
313
314 /// Stack to track the Logical Operator recursion nest for MC/DC.
316
317 /// Stack to track the controlled convergence tokens.
319
320 /// Number of nested loop to be consumed by the last surrounding
321 /// loop-associated directive.
323
324 // CodeGen lambda for loops and support for ordered clause
325 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
326 JumpDest)>
328 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
329 const unsigned, const bool)>
331
332 // Codegen lambda for loop bounds in worksharing loop constructs
333 typedef llvm::function_ref<std::pair<LValue, LValue>(
336
337 // Codegen lambda for loop bounds in dispatch-based loop implementation
338 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
340 Address UB)>
342
343 /// CGBuilder insert helper. This function is called after an
344 /// instruction is created using Builder.
345 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
346 llvm::BasicBlock::iterator InsertPt) const;
347
348 /// CurFuncDecl - Holds the Decl for the current outermost
349 /// non-closure context.
350 const Decl *CurFuncDecl = nullptr;
351 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
352 const Decl *CurCodeDecl = nullptr;
353 const CGFunctionInfo *CurFnInfo = nullptr;
355 llvm::Function *CurFn = nullptr;
356
357 /// Save Parameter Decl for coroutine.
359
360 // Holds coroutine data if the current function is a coroutine. We use a
361 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
362 // in this header.
363 struct CGCoroInfo {
364 std::unique_ptr<CGCoroData> Data;
365 bool InSuspendBlock = false;
366 CGCoroInfo();
367 ~CGCoroInfo();
368 };
370
371 bool isCoroutine() const {
372 return CurCoro.Data != nullptr;
373 }
374
375 bool inSuspendBlock() const {
377 }
378
379 // Holds FramePtr for await_suspend wrapper generation,
380 // so that __builtin_coro_frame call can be lowered
381 // directly to value of its second argument
383 llvm::Value *FramePtr = nullptr;
384 };
386
387 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
388 // It encapsulates SuspendExpr in a function, to separate it's body
389 // from the main coroutine to avoid miscompilations. Intrinisic
390 // is lowered to this function call in CoroSplit pass
391 // Function signature is:
392 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
393 // where type is one of (void, i1, ptr)
394 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
395 Twine const &SuspendPointName,
396 CoroutineSuspendExpr const &S);
397
398 /// CurGD - The GlobalDecl for the current function being compiled.
400
401 /// PrologueCleanupDepth - The cleanup depth enclosing all the
402 /// cleanups associated with the parameters.
404
405 /// ReturnBlock - Unified return block.
407
408 /// ReturnValue - The temporary alloca to hold the return
409 /// value. This is invalid iff the function has no return value.
411
412 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
413 /// This is invalid if sret is not in use.
415
416 /// If a return statement is being visited, this holds the return statment's
417 /// result expression.
418 const Expr *RetExpr = nullptr;
419
420 /// Return true if a label was seen in the current scope.
422 if (CurLexicalScope)
423 return CurLexicalScope->hasLabels();
424 return !LabelMap.empty();
425 }
426
427 /// AllocaInsertPoint - This is an instruction in the entry block before which
428 /// we prefer to insert allocas.
429 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
430
431private:
432 /// PostAllocaInsertPt - This is a place in the prologue where code can be
433 /// inserted that will be dominated by all the static allocas. This helps
434 /// achieve two things:
435 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
436 /// 2. All other prologue code (which are dominated by static allocas) do
437 /// appear in the source order immediately after all static allocas.
438 ///
439 /// PostAllocaInsertPt will be lazily created when it is *really* required.
440 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
441
442public:
443 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
444 /// immediately after AllocaInsertPt.
445 llvm::Instruction *getPostAllocaInsertPoint() {
446 if (!PostAllocaInsertPt) {
447 assert(AllocaInsertPt &&
448 "Expected static alloca insertion point at function prologue");
449 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
450 "EBB should be entry block of the current code gen function");
451 PostAllocaInsertPt = AllocaInsertPt->clone();
452 PostAllocaInsertPt->setName("postallocapt");
453 PostAllocaInsertPt->insertAfter(AllocaInsertPt);
454 }
455
456 return PostAllocaInsertPt;
457 }
458
459 /// API for captured statement code generation.
461 public:
463 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
466 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
467
469 S.getCapturedRecordDecl()->field_begin();
470 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
471 E = S.capture_end();
472 I != E; ++I, ++Field) {
473 if (I->capturesThis())
474 CXXThisFieldDecl = *Field;
475 else if (I->capturesVariable())
476 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
477 else if (I->capturesVariableByCopy())
478 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
479 }
480 }
481
482 virtual ~CGCapturedStmtInfo();
483
484 CapturedRegionKind getKind() const { return Kind; }
485
486 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
487 // Retrieve the value of the context parameter.
488 virtual llvm::Value *getContextValue() const { return ThisValue; }
489
490 /// Lookup the captured field decl for a variable.
491 virtual const FieldDecl *lookup(const VarDecl *VD) const {
492 return CaptureFields.lookup(VD->getCanonicalDecl());
493 }
494
495 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
496 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
497
498 static bool classof(const CGCapturedStmtInfo *) {
499 return true;
500 }
501
502 /// Emit the captured statement body.
503 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
505 CGF.EmitStmt(S);
506 }
507
508 /// Get the name of the capture helper.
509 virtual StringRef getHelperName() const { return "__captured_stmt"; }
510
511 /// Get the CaptureFields
512 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
513 return CaptureFields;
514 }
515
516 private:
517 /// The kind of captured statement being generated.
519
520 /// Keep the map between VarDecl and FieldDecl.
521 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
522
523 /// The base address of the captured record, passed in as the first
524 /// argument of the parallel region function.
525 llvm::Value *ThisValue;
526
527 /// Captured 'this' type.
528 FieldDecl *CXXThisFieldDecl;
529 };
531
532 /// RAII for correct setting/restoring of CapturedStmtInfo.
534 private:
535 CodeGenFunction &CGF;
536 CGCapturedStmtInfo *PrevCapturedStmtInfo;
537 public:
539 CGCapturedStmtInfo *NewCapturedStmtInfo)
540 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
541 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
542 }
543 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
544 };
545
546 /// An abstract representation of regular/ObjC call/message targets.
548 /// The function declaration of the callee.
549 const Decl *CalleeDecl;
550
551 public:
552 AbstractCallee() : CalleeDecl(nullptr) {}
553 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
554 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
555 bool hasFunctionDecl() const {
556 return isa_and_nonnull<FunctionDecl>(CalleeDecl);
557 }
558 const Decl *getDecl() const { return CalleeDecl; }
559 unsigned getNumParams() const {
560 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
561 return FD->getNumParams();
562 return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
563 }
564 const ParmVarDecl *getParamDecl(unsigned I) const {
565 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
566 return FD->getParamDecl(I);
567 return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
568 }
569 };
570
571 /// Sanitizers enabled for this function.
573
574 /// True if CodeGen currently emits code implementing sanitizer checks.
575 bool IsSanitizerScope = false;
576
577 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
579 CodeGenFunction *CGF;
580 public:
583 };
584
585 /// In C++, whether we are code generating a thunk. This controls whether we
586 /// should emit cleanups.
587 bool CurFuncIsThunk = false;
588
589 /// In ARC, whether we should autorelease the return value.
590 bool AutoreleaseResult = false;
591
592 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
593 /// potentially set the return value.
594 bool SawAsmBlock = false;
595
597
598 /// True if the current function is an outlined SEH helper. This can be a
599 /// finally block or filter expression.
601
602 /// True if CodeGen currently emits code inside presereved access index
603 /// region.
605
606 /// True if the current statement has nomerge attribute.
608
609 /// True if the current statement has noinline attribute.
611
612 /// True if the current statement has always_inline attribute.
614
615 /// True if the current statement has noconvergent attribute.
617
618 // The CallExpr within the current statement that the musttail attribute
619 // applies to. nullptr if there is no 'musttail' on the current statement.
620 const CallExpr *MustTailCall = nullptr;
621
622 /// Returns true if a function must make progress, which means the
623 /// mustprogress attribute can be added.
625 if (CGM.getCodeGenOpts().getFiniteLoops() ==
627 return false;
628
629 // C++11 and later guarantees that a thread eventually will do one of the
630 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
631 // - terminate,
632 // - make a call to a library I/O function,
633 // - perform an access through a volatile glvalue, or
634 // - perform a synchronization operation or an atomic operation.
635 //
636 // Hence each function is 'mustprogress' in C++11 or later.
637 return getLangOpts().CPlusPlus11;
638 }
639
640 /// Returns true if a loop must make progress, which means the mustprogress
641 /// attribute can be added. \p HasConstantCond indicates whether the branch
642 /// condition is a known constant.
643 bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
644
646 llvm::Value *BlockPointer = nullptr;
647
648 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
650
651 /// A mapping from NRVO variables to the flags used to indicate
652 /// when the NRVO has been applied to this variable.
653 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
654
657
658 // A stack of cleanups which were added to EHStack but have to be deactivated
659 // later before being popped or emitted. These are usually deactivated on
660 // exiting a `CleanupDeactivationScope` scope. For instance, after a
661 // full-expr.
662 //
663 // These are specially useful for correctly emitting cleanups while
664 // encountering branches out of expression (through stmt-expr or coroutine
665 // suspensions).
668 llvm::Instruction *DominatingIP;
669 };
671
672 // Enters a new scope for capturing cleanups which are deferred to be
673 // deactivated, all of which will be deactivated once the scope is exited.
682
684 assert(!Deactivated && "Deactivating already deactivated scope");
686 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
687 CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
688 Stack[I - 1].DominatingIP);
689 Stack[I - 1].DominatingIP->eraseFromParent();
690 }
691 Stack.resize(OldDeactivateCleanupStackSize);
692 Deactivated = true;
693 }
694
696 if (Deactivated)
697 return;
699 }
700 };
701
703
704 llvm::Instruction *CurrentFuncletPad = nullptr;
705
707 bool isRedundantBeforeReturn() override { return true; }
708
709 llvm::Value *Addr;
710 llvm::Value *Size;
711
712 public:
713 CallLifetimeEnd(RawAddress addr, llvm::Value *size)
714 : Addr(addr.getPointer()), Size(size) {}
715
716 void Emit(CodeGenFunction &CGF, Flags flags) override {
717 CGF.EmitLifetimeEnd(Size, Addr);
718 }
719 };
720
721 /// Header for data within LifetimeExtendedCleanupStack.
723 /// The size of the following cleanup object.
724 unsigned Size;
725 /// The kind of cleanup to push.
726 LLVM_PREFERRED_TYPE(CleanupKind)
728 /// Whether this is a conditional cleanup.
729 LLVM_PREFERRED_TYPE(bool)
730 unsigned IsConditional : 1;
731
732 size_t getSize() const { return Size; }
733 CleanupKind getKind() const { return (CleanupKind)Kind; }
734 bool isConditional() const { return IsConditional; }
735 };
736
737 /// i32s containing the indexes of the cleanup destinations.
739
741
742 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
743 llvm::BasicBlock *EHResumeBlock = nullptr;
744
745 /// The exception slot. All landing pads write the current exception pointer
746 /// into this alloca.
747 llvm::Value *ExceptionSlot = nullptr;
748
749 /// The selector slot. Under the MandatoryCleanup model, all landing pads
750 /// write the current selector value into this alloca.
751 llvm::AllocaInst *EHSelectorSlot = nullptr;
752
753 /// A stack of exception code slots. Entering an __except block pushes a slot
754 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
755 /// a value from the top of the stack.
757
758 /// Value returned by __exception_info intrinsic.
759 llvm::Value *SEHInfo = nullptr;
760
761 /// Emits a landing pad for the current EH stack.
762 llvm::BasicBlock *EmitLandingPad();
763
764 llvm::BasicBlock *getInvokeDestImpl();
765
766 /// Parent loop-based directive for scan directive.
768 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
769 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
770 llvm::BasicBlock *OMPScanExitBlock = nullptr;
771 llvm::BasicBlock *OMPScanDispatch = nullptr;
772 bool OMPFirstScanLoop = false;
773
774 /// Manages parent directive for scan directives.
776 CodeGenFunction &CGF;
777 const OMPExecutableDirective *ParentLoopDirectiveForScan;
778
779 public:
781 CodeGenFunction &CGF,
782 const OMPExecutableDirective &ParentLoopDirectiveForScan)
783 : CGF(CGF),
784 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
785 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
786 }
788 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
789 }
790 };
791
792 template <class T>
794 return DominatingValue<T>::save(*this, value);
795 }
796
798 public:
799 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
802
803 private:
804 void ConstructorHelper(FPOptions FPFeatures);
805 CodeGenFunction &CGF;
806 FPOptions OldFPFeatures;
807 llvm::fp::ExceptionBehavior OldExcept;
808 llvm::RoundingMode OldRounding;
809 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
810 };
812
813public:
814 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
815 /// rethrows.
817
818 /// A class controlling the emission of a finally block.
820 /// Where the catchall's edge through the cleanup should go.
821 JumpDest RethrowDest;
822
823 /// A function to call to enter the catch.
824 llvm::FunctionCallee BeginCatchFn;
825
826 /// An i1 variable indicating whether or not the @finally is
827 /// running for an exception.
828 llvm::AllocaInst *ForEHVar = nullptr;
829
830 /// An i8* variable into which the exception pointer to rethrow
831 /// has been saved.
832 llvm::AllocaInst *SavedExnVar = nullptr;
833
834 public:
835 void enter(CodeGenFunction &CGF, const Stmt *Finally,
836 llvm::FunctionCallee beginCatchFn,
837 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
838 void exit(CodeGenFunction &CGF);
839 };
840
841 /// Returns true inside SEH __try blocks.
842 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
843
844 /// Returns true while emitting a cleanuppad.
845 bool isCleanupPadScope() const {
846 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
847 }
848
849 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
850 /// current full-expression. Safe against the possibility that
851 /// we're currently inside a conditionally-evaluated expression.
852 template <class T, class... As>
853 void pushFullExprCleanup(CleanupKind kind, As... A) {
854 // If we're not in a conditional branch, or if none of the
855 // arguments requires saving, then use the unconditional cleanup.
857 return EHStack.pushCleanup<T>(kind, A...);
858
859 // Stash values in a tuple so we can guarantee the order of saves.
860 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
861 SavedTuple Saved{saveValueInCond(A)...};
862
863 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
864 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
866 }
867
868 /// Queue a cleanup to be pushed after finishing the current full-expression,
869 /// potentially with an active flag.
870 template <class T, class... As>
873 return pushCleanupAfterFullExprWithActiveFlag<T>(
874 Kind, RawAddress::invalid(), A...);
875
876 RawAddress ActiveFlag = createCleanupActiveFlag();
877 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
878 "cleanup active flag should never need saving");
879
880 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
881 SavedTuple Saved{saveValueInCond(A)...};
882
883 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
884 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag, Saved);
885 }
886
887 template <class T, class... As>
889 RawAddress ActiveFlag, As... A) {
890 LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
891 ActiveFlag.isValid()};
892
895 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
896 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
897
898 static_assert(sizeof(Header) % alignof(T) == 0,
899 "Cleanup will be allocated on misaligned address");
900 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
901 new (Buffer) LifetimeExtendedCleanupHeader(Header);
902 new (Buffer + sizeof(Header)) T(A...);
903 if (Header.IsConditional)
904 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
905 }
906
907 // Push a cleanup onto EHStack and deactivate it later. It is usually
908 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
909 // full expression).
910 template <class T, class... As>
912 // Placeholder dominating IP for this cleanup.
913 llvm::Instruction *DominatingIP =
914 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
915 EHStack.pushCleanup<T>(Kind, A...);
917 {EHStack.stable_begin(), DominatingIP});
918 }
919
920 /// Set up the last cleanup that was pushed as a conditional
921 /// full-expression cleanup.
924 }
925
928
929 /// PushDestructorCleanup - Push a cleanup to call the
930 /// complete-object destructor of an object of the given type at the
931 /// given address. Does nothing if T is not a C++ class type with a
932 /// non-trivial destructor.
934
935 /// PushDestructorCleanup - Push a cleanup to call the
936 /// complete-object variant of the given destructor on the object at
937 /// the given address.
939 Address Addr);
940
941 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
942 /// process all branch fixups.
943 void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
944 bool ForDeactivation = false);
945
946 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
947 /// The block cannot be reactivated. Pops it if it's the top of the
948 /// stack.
949 ///
950 /// \param DominatingIP - An instruction which is known to
951 /// dominate the current IP (if set) and which lies along
952 /// all paths of execution between the current IP and the
953 /// the point at which the cleanup comes into scope.
955 llvm::Instruction *DominatingIP);
956
957 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
958 /// Cannot be used to resurrect a deactivated cleanup.
959 ///
960 /// \param DominatingIP - An instruction which is known to
961 /// dominate the current IP (if set) and which lies along
962 /// all paths of execution between the current IP and the
963 /// the point at which the cleanup comes into scope.
965 llvm::Instruction *DominatingIP);
966
967 /// Enters a new scope for capturing cleanups, all of which
968 /// will be executed once the scope is exited.
970 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
971 size_t LifetimeExtendedCleanupStackSize;
972 CleanupDeactivationScope DeactivateCleanups;
973 bool OldDidCallStackSave;
974 protected:
976 private:
977
978 RunCleanupsScope(const RunCleanupsScope &) = delete;
979 void operator=(const RunCleanupsScope &) = delete;
980
981 protected:
983
984 public:
985 /// Enter a new cleanup scope.
987 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
988 CleanupStackDepth = CGF.EHStack.stable_begin();
989 LifetimeExtendedCleanupStackSize =
991 OldDidCallStackSave = CGF.DidCallStackSave;
992 CGF.DidCallStackSave = false;
993 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
994 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
995 }
996
997 /// Exit this cleanup scope, emitting any accumulated cleanups.
999 if (PerformCleanup)
1000 ForceCleanup();
1001 }
1002
1003 /// Determine whether this scope requires any cleanups.
1004 bool requiresCleanups() const {
1005 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1006 }
1007
1008 /// Force the emission of cleanups now, instead of waiting
1009 /// until this object is destroyed.
1010 /// \param ValuesToReload - A list of values that need to be available at
1011 /// the insertion point after cleanup emission. If cleanup emission created
1012 /// a shared cleanup block, these value pointers will be rewritten.
1013 /// Otherwise, they not will be modified.
1014 void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
1015 assert(PerformCleanup && "Already forced cleanup");
1016 CGF.DidCallStackSave = OldDidCallStackSave;
1017 DeactivateCleanups.ForceDeactivate();
1018 CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
1019 ValuesToReload);
1020 PerformCleanup = false;
1021 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1022 }
1023 };
1024
1025 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1028
1030 SourceRange Range;
1032 LexicalScope *ParentScope;
1033
1034 LexicalScope(const LexicalScope &) = delete;
1035 void operator=(const LexicalScope &) = delete;
1036
1037 public:
1038 /// Enter a new cleanup scope.
1040 : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
1041 CGF.CurLexicalScope = this;
1042 if (CGDebugInfo *DI = CGF.getDebugInfo())
1043 DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
1044 }
1045
1046 void addLabel(const LabelDecl *label) {
1047 assert(PerformCleanup && "adding label to dead scope?");
1048 Labels.push_back(label);
1049 }
1050
1051 /// Exit this cleanup scope, emitting any accumulated
1052 /// cleanups.
1054 if (CGDebugInfo *DI = CGF.getDebugInfo())
1055 DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
1056
1057 // If we should perform a cleanup, force them now. Note that
1058 // this ends the cleanup scope before rescoping any labels.
1059 if (PerformCleanup) {
1060 ApplyDebugLocation DL(CGF, Range.getEnd());
1061 ForceCleanup();
1062 }
1063 }
1064
1065 /// Force the emission of cleanups now, instead of waiting
1066 /// until this object is destroyed.
1068 CGF.CurLexicalScope = ParentScope;
1070
1071 if (!Labels.empty())
1072 rescopeLabels();
1073 }
1074
1075 bool hasLabels() const {
1076 return !Labels.empty();
1077 }
1078
1079 void rescopeLabels();
1080 };
1081
1082 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1083
1084 /// The class used to assign some variables some temporarily addresses.
1086 DeclMapTy SavedLocals;
1087 DeclMapTy SavedTempAddresses;
1088 OMPMapVars(const OMPMapVars &) = delete;
1089 void operator=(const OMPMapVars &) = delete;
1090
1091 public:
1092 explicit OMPMapVars() = default;
1094 assert(SavedLocals.empty() && "Did not restored original addresses.");
1095 };
1096
1097 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1098 /// function \p CGF.
1099 /// \return true if at least one variable was set already, false otherwise.
1100 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1101 Address TempAddr) {
1102 LocalVD = LocalVD->getCanonicalDecl();
1103 // Only save it once.
1104 if (SavedLocals.count(LocalVD)) return false;
1105
1106 // Copy the existing local entry to SavedLocals.
1107 auto it = CGF.LocalDeclMap.find(LocalVD);
1108 if (it != CGF.LocalDeclMap.end())
1109 SavedLocals.try_emplace(LocalVD, it->second);
1110 else
1111 SavedLocals.try_emplace(LocalVD, Address::invalid());
1112
1113 // Generate the private entry.
1114 QualType VarTy = LocalVD->getType();
1115 if (VarTy->isReferenceType()) {
1116 Address Temp = CGF.CreateMemTemp(VarTy);
1117 CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
1118 TempAddr = Temp;
1119 }
1120 SavedTempAddresses.try_emplace(LocalVD, TempAddr);
1121
1122 return true;
1123 }
1124
1125 /// Applies new addresses to the list of the variables.
1126 /// \return true if at least one variable is using new address, false
1127 /// otherwise.
1129 copyInto(SavedTempAddresses, CGF.LocalDeclMap);
1130 SavedTempAddresses.clear();
1131 return !SavedLocals.empty();
1132 }
1133
1134 /// Restores original addresses of the variables.
1136 if (!SavedLocals.empty()) {
1137 copyInto(SavedLocals, CGF.LocalDeclMap);
1138 SavedLocals.clear();
1139 }
1140 }
1141
1142 private:
1143 /// Copy all the entries in the source map over the corresponding
1144 /// entries in the destination, which must exist.
1145 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1146 for (auto &Pair : Src) {
1147 if (!Pair.second.isValid()) {
1148 Dest.erase(Pair.first);
1149 continue;
1150 }
1151
1152 auto I = Dest.find(Pair.first);
1153 if (I != Dest.end())
1154 I->second = Pair.second;
1155 else
1156 Dest.insert(Pair);
1157 }
1158 }
1159 };
1160
1161 /// The scope used to remap some variables as private in the OpenMP loop body
1162 /// (or other captured region emitted without outlining), and to restore old
1163 /// vars back on exit.
1165 OMPMapVars MappedVars;
1166 OMPPrivateScope(const OMPPrivateScope &) = delete;
1167 void operator=(const OMPPrivateScope &) = delete;
1168
1169 public:
1170 /// Enter a new OpenMP private scope.
1172
1173 /// Registers \p LocalVD variable as a private with \p Addr as the address
1174 /// of the corresponding private variable. \p
1175 /// PrivateGen is the address of the generated private variable.
1176 /// \return true if the variable is registered as private, false if it has
1177 /// been privatized already.
1178 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1179 assert(PerformCleanup && "adding private to dead scope");
1180 return MappedVars.setVarAddr(CGF, LocalVD, Addr);
1181 }
1182
1183 /// Privatizes local variables previously registered as private.
1184 /// Registration is separate from the actual privatization to allow
1185 /// initializers use values of the original variables, not the private one.
1186 /// This is important, for example, if the private variable is a class
1187 /// variable initialized by a constructor that references other private
1188 /// variables. But at initialization original variables must be used, not
1189 /// private copies.
1190 /// \return true if at least one variable was privatized, false otherwise.
1191 bool Privatize() { return MappedVars.apply(CGF); }
1192
1195 restoreMap();
1196 }
1197
1198 /// Exit scope - all the mapped variables are restored.
1200 if (PerformCleanup)
1201 ForceCleanup();
1202 }
1203
1204 /// Checks if the global variable is captured in current function.
1205 bool isGlobalVarCaptured(const VarDecl *VD) const {
1206 VD = VD->getCanonicalDecl();
1207 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
1208 }
1209
1210 /// Restore all mapped variables w/o clean up. This is usefully when we want
1211 /// to reference the original variables but don't want the clean up because
1212 /// that could emit lifetime end too early, causing backend issue #56913.
1213 void restoreMap() { MappedVars.restore(CGF); }
1214 };
1215
1216 /// Save/restore original map of previously emitted local vars in case when we
1217 /// need to duplicate emission of the same code several times in the same
1218 /// function for OpenMP code.
1220 CodeGenFunction &CGF;
1221 DeclMapTy SavedMap;
1222
1223 public:
1225 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1226 ~OMPLocalDeclMapRAII() { SavedMap.swap(CGF.LocalDeclMap); }
1227 };
1228
1229 /// Takes the old cleanup stack size and emits the cleanup blocks
1230 /// that have been added.
1231 void
1233 std::initializer_list<llvm::Value **> ValuesToReload = {});
1234
1235 /// Takes the old cleanup stack size and emits the cleanup blocks
1236 /// that have been added, then adds all lifetime-extended cleanups from
1237 /// the given position to the stack.
1238 void
1240 size_t OldLifetimeExtendedStackSize,
1241 std::initializer_list<llvm::Value **> ValuesToReload = {});
1242
1243 void ResolveBranchFixups(llvm::BasicBlock *Target);
1244
1245 /// The given basic block lies in the current EH scope, but may be a
1246 /// target of a potentially scope-crossing jump; get a stable handle
1247 /// to which we can perform this jump later.
1249 return JumpDest(Target,
1252 }
1253
1254 /// The given basic block lies in the current EH scope, but may be a
1255 /// target of a potentially scope-crossing jump; get a stable handle
1256 /// to which we can perform this jump later.
1257 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1259 }
1260
1261 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1262 /// block through the normal cleanup handling code (if any) and then
1263 /// on to \arg Dest.
1265
1266 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1267 /// specified destination obviously has no cleanups to run. 'false' is always
1268 /// a conservatively correct answer for this method.
1270
1271 /// popCatchScope - Pops the catch scope at the top of the EHScope
1272 /// stack, emitting any required code (other than the catch handlers
1273 /// themselves).
1275
1276 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1278 llvm::BasicBlock *
1280
1281 /// An object to manage conditionally-evaluated expressions.
1283 llvm::BasicBlock *StartBB;
1284
1285 public:
1287 : StartBB(CGF.Builder.GetInsertBlock()) {}
1288
1290 assert(CGF.OutermostConditional != this);
1291 if (!CGF.OutermostConditional)
1292 CGF.OutermostConditional = this;
1293 }
1294
1296 assert(CGF.OutermostConditional != nullptr);
1297 if (CGF.OutermostConditional == this)
1298 CGF.OutermostConditional = nullptr;
1299 }
1300
1301 /// Returns a block which will be executed prior to each
1302 /// evaluation of the conditional code.
1303 llvm::BasicBlock *getStartingBlock() const {
1304 return StartBB;
1305 }
1306 };
1307
1308 /// isInConditionalBranch - Return true if we're currently emitting
1309 /// one branch or the other of a conditional expression.
1310 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1311
1312 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1313 CodeGenFunction &CGF) {
1314 assert(isInConditionalBranch());
1315 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1316 auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF),
1317 block->back().getIterator());
1318 store->setAlignment(addr.getAlignment().getAsAlign());
1319 }
1320
1321 /// An RAII object to record that we're evaluating a statement
1322 /// expression.
1324 CodeGenFunction &CGF;
1325
1326 /// We have to save the outermost conditional: cleanups in a
1327 /// statement expression aren't conditional just because the
1328 /// StmtExpr is.
1329 ConditionalEvaluation *SavedOutermostConditional;
1330
1331 public:
1333 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1334 CGF.OutermostConditional = nullptr;
1335 }
1336
1338 CGF.OutermostConditional = SavedOutermostConditional;
1339 CGF.EnsureInsertPoint();
1340 }
1341 };
1342
1343 /// An object which temporarily prevents a value from being
1344 /// destroyed by aggressive peephole optimizations that assume that
1345 /// all uses of a value have been realized in the IR.
1347 llvm::Instruction *Inst = nullptr;
1348 friend class CodeGenFunction;
1349
1350 public:
1352 };
1353
1354 /// A non-RAII class containing all the information about a bound
1355 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1356 /// this which makes individual mappings very simple; using this
1357 /// class directly is useful when you have a variable number of
1358 /// opaque values or don't want the RAII functionality for some
1359 /// reason.
1361 const OpaqueValueExpr *OpaqueValue;
1362 bool BoundLValue;
1364
1366 bool boundLValue)
1367 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1368 public:
1369 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1370
1371 static bool shouldBindAsLValue(const Expr *expr) {
1372 // gl-values should be bound as l-values for obvious reasons.
1373 // Records should be bound as l-values because IR generation
1374 // always keeps them in memory. Expressions of function type
1375 // act exactly like l-values but are formally required to be
1376 // r-values in C.
1377 return expr->isGLValue() ||
1378 expr->getType()->isFunctionType() ||
1379 hasAggregateEvaluationKind(expr->getType());
1380 }
1381
1383 const OpaqueValueExpr *ov,
1384 const Expr *e) {
1385 if (shouldBindAsLValue(ov))
1386 return bind(CGF, ov, CGF.EmitLValue(e));
1387 return bind(CGF, ov, CGF.EmitAnyExpr(e));
1388 }
1389
1391 const OpaqueValueExpr *ov,
1392 const LValue &lv) {
1393 assert(shouldBindAsLValue(ov));
1394 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1395 return OpaqueValueMappingData(ov, true);
1396 }
1397
1399 const OpaqueValueExpr *ov,
1400 const RValue &rv) {
1401 assert(!shouldBindAsLValue(ov));
1402 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1403
1404 OpaqueValueMappingData data(ov, false);
1405
1406 // Work around an extremely aggressive peephole optimization in
1407 // EmitScalarConversion which assumes that all other uses of a
1408 // value are extant.
1409 data.Protection = CGF.protectFromPeepholes(rv);
1410
1411 return data;
1412 }
1413
1414 bool isValid() const { return OpaqueValue != nullptr; }
1415 void clear() { OpaqueValue = nullptr; }
1416
1418 assert(OpaqueValue && "no data to unbind!");
1419
1420 if (BoundLValue) {
1421 CGF.OpaqueLValues.erase(OpaqueValue);
1422 } else {
1423 CGF.OpaqueRValues.erase(OpaqueValue);
1424 CGF.unprotectFromPeepholes(Protection);
1425 }
1426 }
1427 };
1428
1429 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1431 CodeGenFunction &CGF;
1433
1434 public:
1435 static bool shouldBindAsLValue(const Expr *expr) {
1437 }
1438
1439 /// Build the opaque value mapping for the given conditional
1440 /// operator if it's the GNU ?: extension. This is a common
1441 /// enough pattern that the convenience operator is really
1442 /// helpful.
1443 ///
1445 const AbstractConditionalOperator *op) : CGF(CGF) {
1446 if (isa<ConditionalOperator>(op))
1447 // Leave Data empty.
1448 return;
1449
1450 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1452 e->getCommon());
1453 }
1454
1455 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1456 /// expression is set to the expression the OVE represents.
1458 : CGF(CGF) {
1459 if (OV) {
1460 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1461 "for OVE with no source expression");
1463 }
1464 }
1465
1467 const OpaqueValueExpr *opaqueValue,
1468 LValue lvalue)
1469 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1470 }
1471
1473 const OpaqueValueExpr *opaqueValue,
1474 RValue rvalue)
1475 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1476 }
1477
1478 void pop() {
1479 Data.unbind(CGF);
1480 Data.clear();
1481 }
1482
1484 if (Data.isValid()) Data.unbind(CGF);
1485 }
1486 };
1487
1488private:
1489 CGDebugInfo *DebugInfo;
1490 /// Used to create unique names for artificial VLA size debug info variables.
1491 unsigned VLAExprCounter = 0;
1492 bool DisableDebugInfo = false;
1493
1494 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1495 /// calling llvm.stacksave for multiple VLAs in the same scope.
1496 bool DidCallStackSave = false;
1497
1498 /// IndirectBranch - The first time an indirect goto is seen we create a block
1499 /// with an indirect branch. Every time we see the address of a label taken,
1500 /// we add the label to the indirect goto. Every subsequent indirect goto is
1501 /// codegen'd as a jump to the IndirectBranch's basic block.
1502 llvm::IndirectBrInst *IndirectBranch = nullptr;
1503
1504 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1505 /// decls.
1506 DeclMapTy LocalDeclMap;
1507
1508 // Keep track of the cleanups for callee-destructed parameters pushed to the
1509 // cleanup stack so that they can be deactivated later.
1510 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1511 CalleeDestructedParamCleanups;
1512
1513 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1514 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1515 /// parameter.
1516 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1517 SizeArguments;
1518
1519 /// Track escaped local variables with auto storage. Used during SEH
1520 /// outlining to produce a call to llvm.localescape.
1521 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1522
1523 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1524 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1525
1526 // BreakContinueStack - This keeps track of where break and continue
1527 // statements should jump to.
1528 struct BreakContinue {
1529 BreakContinue(JumpDest Break, JumpDest Continue)
1530 : BreakBlock(Break), ContinueBlock(Continue) {}
1531
1532 JumpDest BreakBlock;
1533 JumpDest ContinueBlock;
1534 };
1535 SmallVector<BreakContinue, 8> BreakContinueStack;
1536
1537 /// Handles cancellation exit points in OpenMP-related constructs.
1538 class OpenMPCancelExitStack {
1539 /// Tracks cancellation exit point and join point for cancel-related exit
1540 /// and normal exit.
1541 struct CancelExit {
1542 CancelExit() = default;
1543 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1544 JumpDest ContBlock)
1545 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1546 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1547 /// true if the exit block has been emitted already by the special
1548 /// emitExit() call, false if the default codegen is used.
1549 bool HasBeenEmitted = false;
1550 JumpDest ExitBlock;
1551 JumpDest ContBlock;
1552 };
1553
1554 SmallVector<CancelExit, 8> Stack;
1555
1556 public:
1557 OpenMPCancelExitStack() : Stack(1) {}
1558 ~OpenMPCancelExitStack() = default;
1559 /// Fetches the exit block for the current OpenMP construct.
1560 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1561 /// Emits exit block with special codegen procedure specific for the related
1562 /// OpenMP construct + emits code for normal construct cleanup.
1563 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1564 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1565 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1566 assert(CGF.getOMPCancelDestination(Kind).isValid());
1567 assert(CGF.HaveInsertPoint());
1568 assert(!Stack.back().HasBeenEmitted);
1569 auto IP = CGF.Builder.saveAndClearIP();
1570 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1571 CodeGen(CGF);
1572 CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1573 CGF.Builder.restoreIP(IP);
1574 Stack.back().HasBeenEmitted = true;
1575 }
1576 CodeGen(CGF);
1577 }
1578 /// Enter the cancel supporting \a Kind construct.
1579 /// \param Kind OpenMP directive that supports cancel constructs.
1580 /// \param HasCancel true, if the construct has inner cancel directive,
1581 /// false otherwise.
1582 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1583 Stack.push_back({Kind,
1584 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1585 : JumpDest(),
1586 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1587 : JumpDest()});
1588 }
1589 /// Emits default exit point for the cancel construct (if the special one
1590 /// has not be used) + join point for cancel/normal exits.
1591 void exit(CodeGenFunction &CGF) {
1592 if (getExitBlock().isValid()) {
1593 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1594 bool HaveIP = CGF.HaveInsertPoint();
1595 if (!Stack.back().HasBeenEmitted) {
1596 if (HaveIP)
1597 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1598 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1599 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1600 }
1601 CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1602 if (!HaveIP) {
1603 CGF.Builder.CreateUnreachable();
1604 CGF.Builder.ClearInsertionPoint();
1605 }
1606 }
1607 Stack.pop_back();
1608 }
1609 };
1610 OpenMPCancelExitStack OMPCancelStack;
1611
1612 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1613 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1614 Stmt::Likelihood LH);
1615
1616 CodeGenPGO PGO;
1617
1618 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1619 Address MCDCCondBitmapAddr = Address::invalid();
1620
1621 /// Calculate branch weights appropriate for PGO data
1622 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1623 uint64_t FalseCount) const;
1624 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1625 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1626 uint64_t LoopCount) const;
1627
1628public:
1629 /// Increment the profiler's counter for the given statement by \p StepV.
1630 /// If \p StepV is null, the default increment is 1.
1631 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1633 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
1634 !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile)) {
1635 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1636 PGO.emitCounterSetOrIncrement(Builder, S, StepV);
1637 }
1638 PGO.setCurrentStmt(S);
1639 }
1640
1643 CGM.getCodeGenOpts().MCDCCoverage &&
1644 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
1645 }
1646
1647 /// Allocate a temp value on the stack that MCDC can use to track condition
1648 /// results.
1650 if (isMCDCCoverageEnabled()) {
1651 PGO.emitMCDCParameters(Builder);
1652 MCDCCondBitmapAddr =
1653 CreateIRTemp(getContext().UnsignedIntTy, "mcdc.addr");
1654 }
1655 }
1656
1657 bool isBinaryLogicalOp(const Expr *E) const {
1658 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
1659 return (BOp && BOp->isLogicalOp());
1660 }
1661
1662 /// Zero-init the MCDC temp value.
1665 PGO.emitMCDCCondBitmapReset(Builder, E, MCDCCondBitmapAddr);
1666 PGO.setCurrentStmt(E);
1667 }
1668 }
1669
1670 /// Increment the profiler's counter for the given expression by \p StepV.
1671 /// If \p StepV is null, the default increment is 1.
1674 PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this);
1675 PGO.setCurrentStmt(E);
1676 }
1677 }
1678
1679 /// Update the MCDC temp value with the condition's evaluated result.
1680 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
1681 if (isMCDCCoverageEnabled()) {
1682 PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this);
1683 PGO.setCurrentStmt(E);
1684 }
1685 }
1686
1687 /// Get the profiler's count for the given statement.
1688 uint64_t getProfileCount(const Stmt *S) {
1689 return PGO.getStmtCount(S).value_or(0);
1690 }
1691
1692 /// Set the profiler's current count.
1693 void setCurrentProfileCount(uint64_t Count) {
1694 PGO.setCurrentRegionCount(Count);
1695 }
1696
1697 /// Get the profiler's current count. This is generally the count for the most
1698 /// recently incremented counter.
1700 return PGO.getCurrentRegionCount();
1701 }
1702
1703private:
1704
1705 /// SwitchInsn - This is nearest current switch instruction. It is null if
1706 /// current context is not in a switch.
1707 llvm::SwitchInst *SwitchInsn = nullptr;
1708 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1709 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1710
1711 /// The likelihood attributes of the SwitchCase.
1712 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1713
1714 /// CaseRangeBlock - This block holds if condition check for last case
1715 /// statement range in current switch instruction.
1716 llvm::BasicBlock *CaseRangeBlock = nullptr;
1717
1718 /// OpaqueLValues - Keeps track of the current set of opaque value
1719 /// expressions.
1720 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1721 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1722
1723 // VLASizeMap - This keeps track of the associated size for each VLA type.
1724 // We track this by the size expression rather than the type itself because
1725 // in certain situations, like a const qualifier applied to an VLA typedef,
1726 // multiple VLA types can share the same size expression.
1727 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1728 // enter/leave scopes.
1729 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1730
1731 /// A block containing a single 'unreachable' instruction. Created
1732 /// lazily by getUnreachableBlock().
1733 llvm::BasicBlock *UnreachableBlock = nullptr;
1734
1735 /// Counts of the number return expressions in the function.
1736 unsigned NumReturnExprs = 0;
1737
1738 /// Count the number of simple (constant) return expressions in the function.
1739 unsigned NumSimpleReturnExprs = 0;
1740
1741 /// The last regular (non-return) debug location (breakpoint) in the function.
1742 SourceLocation LastStopPoint;
1743
1744public:
1745 /// Source location information about the default argument or member
1746 /// initializer expression we're evaluating, if any.
1750
1751 /// A scope within which we are constructing the fields of an object which
1752 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1753 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1755 public:
1757 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1758 CGF.CXXDefaultInitExprThis = This;
1759 }
1761 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1762 }
1763
1764 private:
1765 CodeGenFunction &CGF;
1766 Address OldCXXDefaultInitExprThis;
1767 };
1768
1769 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1770 /// is overridden to be the object under construction.
1772 public:
1774 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1775 OldCXXThisAlignment(CGF.CXXThisAlignment),
1777 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1778 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1779 }
1781 CGF.CXXThisValue = OldCXXThisValue;
1782 CGF.CXXThisAlignment = OldCXXThisAlignment;
1783 }
1784
1785 public:
1787 llvm::Value *OldCXXThisValue;
1790 };
1791
1795 };
1796
1797 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1798 /// current loop index is overridden.
1800 public:
1801 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1802 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1803 CGF.ArrayInitIndex = Index;
1804 }
1806 CGF.ArrayInitIndex = OldArrayInitIndex;
1807 }
1808
1809 private:
1810 CodeGenFunction &CGF;
1811 llvm::Value *OldArrayInitIndex;
1812 };
1813
1815 public:
1817 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1818 OldCurCodeDecl(CGF.CurCodeDecl),
1819 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1820 OldCXXABIThisValue(CGF.CXXABIThisValue),
1821 OldCXXThisValue(CGF.CXXThisValue),
1822 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1823 OldCXXThisAlignment(CGF.CXXThisAlignment),
1824 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1825 OldCXXInheritedCtorInitExprArgs(
1826 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1827 CGF.CurGD = GD;
1828 CGF.CurFuncDecl = CGF.CurCodeDecl =
1829 cast<CXXConstructorDecl>(GD.getDecl());
1830 CGF.CXXABIThisDecl = nullptr;
1831 CGF.CXXABIThisValue = nullptr;
1832 CGF.CXXThisValue = nullptr;
1833 CGF.CXXABIThisAlignment = CharUnits();
1834 CGF.CXXThisAlignment = CharUnits();
1836 CGF.FnRetTy = QualType();
1837 CGF.CXXInheritedCtorInitExprArgs.clear();
1838 }
1840 CGF.CurGD = OldCurGD;
1841 CGF.CurFuncDecl = OldCurFuncDecl;
1842 CGF.CurCodeDecl = OldCurCodeDecl;
1843 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1844 CGF.CXXABIThisValue = OldCXXABIThisValue;
1845 CGF.CXXThisValue = OldCXXThisValue;
1846 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1847 CGF.CXXThisAlignment = OldCXXThisAlignment;
1848 CGF.ReturnValue = OldReturnValue;
1849 CGF.FnRetTy = OldFnRetTy;
1850 CGF.CXXInheritedCtorInitExprArgs =
1851 std::move(OldCXXInheritedCtorInitExprArgs);
1852 }
1853
1854 private:
1855 CodeGenFunction &CGF;
1856 GlobalDecl OldCurGD;
1857 const Decl *OldCurFuncDecl;
1858 const Decl *OldCurCodeDecl;
1859 ImplicitParamDecl *OldCXXABIThisDecl;
1860 llvm::Value *OldCXXABIThisValue;
1861 llvm::Value *OldCXXThisValue;
1862 CharUnits OldCXXABIThisAlignment;
1863 CharUnits OldCXXThisAlignment;
1864 Address OldReturnValue;
1865 QualType OldFnRetTy;
1866 CallArgList OldCXXInheritedCtorInitExprArgs;
1867 };
1868
1869 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1870 // region body, and finalization codegen callbacks. This will class will also
1871 // contain privatization functions used by the privatization call backs
1872 //
1873 // TODO: this is temporary class for things that are being moved out of
1874 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1875 // utility function for use with the OMPBuilder. Once that move to use the
1876 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1877 // directly, or a new helper class that will contain functions used by both
1878 // this and the OMPBuilder
1879
1881
1885
1886 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1887
1888 /// Cleanup action for allocate support.
1890
1891 private:
1892 llvm::CallInst *RTLFnCI;
1893
1894 public:
1895 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1896 RLFnCI->removeFromParent();
1897 }
1898
1899 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1900 if (!CGF.HaveInsertPoint())
1901 return;
1902 CGF.Builder.Insert(RTLFnCI);
1903 }
1904 };
1905
1906 /// Returns address of the threadprivate variable for the current
1907 /// thread. This Also create any necessary OMP runtime calls.
1908 ///
1909 /// \param VD VarDecl for Threadprivate variable.
1910 /// \param VDAddr Address of the Vardecl
1911 /// \param Loc The location where the barrier directive was encountered
1913 const VarDecl *VD, Address VDAddr,
1915
1916 /// Gets the OpenMP-specific address of the local variable /p VD.
1918 const VarDecl *VD);
1919 /// Get the platform-specific name separator.
1920 /// \param Parts different parts of the final name that needs separation
1921 /// \param FirstSeparator First separator used between the initial two
1922 /// parts of the name.
1923 /// \param Separator separator used between all of the rest consecutinve
1924 /// parts of the name
1925 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1926 StringRef FirstSeparator = ".",
1927 StringRef Separator = ".");
1928 /// Emit the Finalization for an OMP region
1929 /// \param CGF The Codegen function this belongs to
1930 /// \param IP Insertion point for generating the finalization code.
1932 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1933 assert(IP.getBlock()->end() != IP.getPoint() &&
1934 "OpenMP IR Builder should cause terminated block!");
1935
1936 llvm::BasicBlock *IPBB = IP.getBlock();
1937 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1938 assert(DestBB && "Finalization block should have one successor!");
1939
1940 // erase and replace with cleanup branch.
1941 IPBB->getTerminator()->eraseFromParent();
1942 CGF.Builder.SetInsertPoint(IPBB);
1944 CGF.EmitBranchThroughCleanup(Dest);
1945 }
1946
1947 /// Emit the body of an OMP region
1948 /// \param CGF The Codegen function this belongs to
1949 /// \param RegionBodyStmt The body statement for the OpenMP region being
1950 /// generated
1951 /// \param AllocaIP Where to insert alloca instructions
1952 /// \param CodeGenIP Where to insert the region code
1953 /// \param RegionName Name to be used for new blocks
1955 const Stmt *RegionBodyStmt,
1956 InsertPointTy AllocaIP,
1957 InsertPointTy CodeGenIP,
1958 Twine RegionName);
1959
1960 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
1961 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
1963 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1964 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
1965 CodeGenIPBBTI->eraseFromParent();
1966
1967 CGF.Builder.SetInsertPoint(CodeGenIPBB);
1968
1969 if (Fn->doesNotThrow())
1970 CGF.EmitNounwindRuntimeCall(Fn, Args);
1971 else
1972 CGF.EmitRuntimeCall(Fn, Args);
1973
1974 if (CGF.Builder.saveIP().isSet())
1975 CGF.Builder.CreateBr(&FiniBB);
1976 }
1977
1978 /// Emit the body of an OMP region that will be outlined in
1979 /// OpenMPIRBuilder::finalize().
1980 /// \param CGF The Codegen function this belongs to
1981 /// \param RegionBodyStmt The body statement for the OpenMP region being
1982 /// generated
1983 /// \param AllocaIP Where to insert alloca instructions
1984 /// \param CodeGenIP Where to insert the region code
1985 /// \param RegionName Name to be used for new blocks
1987 const Stmt *RegionBodyStmt,
1988 InsertPointTy AllocaIP,
1989 InsertPointTy CodeGenIP,
1990 Twine RegionName);
1991
1992 /// RAII for preserving necessary info during Outlined region body codegen.
1994
1995 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
1996 CodeGenFunction::JumpDest OldReturnBlock;
1997 CodeGenFunction &CGF;
1998
1999 public:
2001 llvm::BasicBlock &RetBB)
2002 : CGF(cgf) {
2003 assert(AllocaIP.isSet() &&
2004 "Must specify Insertion point for allocas of outlined function");
2005 OldAllocaIP = CGF.AllocaInsertPt;
2006 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2007
2008 OldReturnBlock = CGF.ReturnBlock;
2009 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
2010 }
2011
2013 CGF.AllocaInsertPt = OldAllocaIP;
2014 CGF.ReturnBlock = OldReturnBlock;
2015 }
2016 };
2017
2018 /// RAII for preserving necessary info during inlined region body codegen.
2020
2021 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2022 CodeGenFunction &CGF;
2023
2024 public:
2026 llvm::BasicBlock &FiniBB)
2027 : CGF(cgf) {
2028 // Alloca insertion block should be in the entry block of the containing
2029 // function so it expects an empty AllocaIP in which case will reuse the
2030 // old alloca insertion point, or a new AllocaIP in the same block as
2031 // the old one
2032 assert((!AllocaIP.isSet() ||
2033 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2034 "Insertion point should be in the entry block of containing "
2035 "function!");
2036 OldAllocaIP = CGF.AllocaInsertPt;
2037 if (AllocaIP.isSet())
2038 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2039
2040 // TODO: Remove the call, after making sure the counter is not used by
2041 // the EHStack.
2042 // Since this is an inlined region, it should not modify the
2043 // ReturnBlock, and should reuse the one for the enclosing outlined
2044 // region. So, the JumpDest being return by the function is discarded
2045 (void)CGF.getJumpDestInCurrentScope(&FiniBB);
2046 }
2047
2049 };
2050 };
2051
2052private:
2053 /// CXXThisDecl - When generating code for a C++ member function,
2054 /// this will hold the implicit 'this' declaration.
2055 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2056 llvm::Value *CXXABIThisValue = nullptr;
2057 llvm::Value *CXXThisValue = nullptr;
2058 CharUnits CXXABIThisAlignment;
2059 CharUnits CXXThisAlignment;
2060
2061 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2062 /// this expression.
2063 Address CXXDefaultInitExprThis = Address::invalid();
2064
2065 /// The current array initialization index when evaluating an
2066 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2067 llvm::Value *ArrayInitIndex = nullptr;
2068
2069 /// The values of function arguments to use when evaluating
2070 /// CXXInheritedCtorInitExprs within this context.
2071 CallArgList CXXInheritedCtorInitExprArgs;
2072
2073 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2074 /// destructor, this will hold the implicit argument (e.g. VTT).
2075 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2076 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2077
2078 /// OutermostConditional - Points to the outermost active
2079 /// conditional control. This is used so that we know if a
2080 /// temporary should be destroyed conditionally.
2081 ConditionalEvaluation *OutermostConditional = nullptr;
2082
2083 /// The current lexical scope.
2084 LexicalScope *CurLexicalScope = nullptr;
2085
2086 /// The current source location that should be used for exception
2087 /// handling code.
2088 SourceLocation CurEHLocation;
2089
2090 /// BlockByrefInfos - For each __block variable, contains
2091 /// information about the layout of the variable.
2092 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2093
2094 /// Used by -fsanitize=nullability-return to determine whether the return
2095 /// value can be checked.
2096 llvm::Value *RetValNullabilityPrecondition = nullptr;
2097
2098 /// Check if -fsanitize=nullability-return instrumentation is required for
2099 /// this function.
2100 bool requiresReturnValueNullabilityCheck() const {
2101 return RetValNullabilityPrecondition;
2102 }
2103
2104 /// Used to store precise source locations for return statements by the
2105 /// runtime return value checks.
2106 Address ReturnLocation = Address::invalid();
2107
2108 /// Check if the return value of this function requires sanitization.
2109 bool requiresReturnValueCheck() const;
2110
2111 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2112 bool hasInAllocaArg(const CXXMethodDecl *MD);
2113
2114 llvm::BasicBlock *TerminateLandingPad = nullptr;
2115 llvm::BasicBlock *TerminateHandler = nullptr;
2117
2118 /// Terminate funclets keyed by parent funclet pad.
2119 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2120
2121 /// Largest vector width used in ths function. Will be used to create a
2122 /// function attribute.
2123 unsigned LargestVectorWidth = 0;
2124
2125 /// True if we need emit the life-time markers. This is initially set in
2126 /// the constructor, but could be overwritten to true if this is a coroutine.
2127 bool ShouldEmitLifetimeMarkers;
2128
2129 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2130 /// the function metadata.
2131 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2132
2133public:
2134 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
2136
2137 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2138 ASTContext &getContext() const { return CGM.getContext(); }
2140 if (DisableDebugInfo)
2141 return nullptr;
2142 return DebugInfo;
2143 }
2144 void disableDebugInfo() { DisableDebugInfo = true; }
2145 void enableDebugInfo() { DisableDebugInfo = false; }
2146
2148 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2149 }
2150
2151 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2152
2153 /// Returns a pointer to the function's exception object and selector slot,
2154 /// which is assigned in every landing pad.
2157
2158 /// Returns the contents of the function's exception object and selector
2159 /// slots.
2160 llvm::Value *getExceptionFromSlot();
2161 llvm::Value *getSelectorFromSlot();
2162
2164
2165 llvm::BasicBlock *getUnreachableBlock() {
2166 if (!UnreachableBlock) {
2167 UnreachableBlock = createBasicBlock("unreachable");
2168 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2169 }
2170 return UnreachableBlock;
2171 }
2172
2173 llvm::BasicBlock *getInvokeDest() {
2174 if (!EHStack.requiresLandingPad()) return nullptr;
2175 return getInvokeDestImpl();
2176 }
2177
2178 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2179
2180 const TargetInfo &getTarget() const { return Target; }
2181 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2183 return CGM.getTargetCodeGenInfo();
2184 }
2185
2186 //===--------------------------------------------------------------------===//
2187 // Cleanups
2188 //===--------------------------------------------------------------------===//
2189
2190 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2191
2192 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2193 Address arrayEndPointer,
2194 QualType elementType,
2195 CharUnits elementAlignment,
2196 Destroyer *destroyer);
2197 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2198 llvm::Value *arrayEnd,
2199 QualType elementType,
2200 CharUnits elementAlignment,
2201 Destroyer *destroyer);
2202
2204 Address addr, QualType type);
2206 Address addr, QualType type);
2208 Destroyer *destroyer, bool useEHCleanupForArray);
2210 Address addr, QualType type);
2212 QualType type, Destroyer *destroyer,
2213 bool useEHCleanupForArray);
2215 QualType type, Destroyer *destroyer,
2216 bool useEHCleanupForArray);
2217 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2218 llvm::Value *CompletePtr,
2219 QualType ElementType);
2222 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2224 bool useEHCleanupForArray);
2226 Destroyer *destroyer,
2227 bool useEHCleanupForArray,
2228 const VarDecl *VD);
2229 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2230 QualType elementType, CharUnits elementAlign,
2231 Destroyer *destroyer,
2232 bool checkZeroLength, bool useEHCleanup);
2233
2235
2236 /// Determines whether an EH cleanup is required to destroy a type
2237 /// with the given destruction kind.
2239 switch (kind) {
2240 case QualType::DK_none:
2241 return false;
2245 return getLangOpts().Exceptions;
2247 return getLangOpts().Exceptions &&
2248 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2249 }
2250 llvm_unreachable("bad destruction kind");
2251 }
2252
2255 }
2256
2257 //===--------------------------------------------------------------------===//
2258 // Objective-C
2259 //===--------------------------------------------------------------------===//
2260
2262
2264
2265 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2267 const ObjCPropertyImplDecl *PID);
2269 const ObjCPropertyImplDecl *propImpl,
2270 const ObjCMethodDecl *GetterMothodDecl,
2271 llvm::Constant *AtomicHelperFn);
2272
2274 ObjCMethodDecl *MD, bool ctor);
2275
2276 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2277 /// for the given property.
2279 const ObjCPropertyImplDecl *PID);
2281 const ObjCPropertyImplDecl *propImpl,
2282 llvm::Constant *AtomicHelperFn);
2283
2284 //===--------------------------------------------------------------------===//
2285 // Block Bits
2286 //===--------------------------------------------------------------------===//
2287
2288 /// Emit block literal.
2289 /// \return an LLVM value which is a pointer to a struct which contains
2290 /// information about the block, including the block invoke function, the
2291 /// captured variables, etc.
2292 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2293
2295 const CGBlockInfo &Info,
2296 const DeclMapTy &ldm,
2297 bool IsLambdaConversionToBlock,
2298 bool BuildGlobalBlock);
2299
2300 /// Check if \p T is a C++ class that has a destructor that can throw.
2302
2303 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2304 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2306 const ObjCPropertyImplDecl *PID);
2308 const ObjCPropertyImplDecl *PID);
2309 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2310
2311 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2312 bool CanThrow);
2313
2314 class AutoVarEmission;
2315
2317
2318 /// Enter a cleanup to destroy a __block variable. Note that this
2319 /// cleanup should be a no-op if the variable hasn't left the stack
2320 /// yet; if a cleanup is required for the variable itself, that needs
2321 /// to be done externally.
2322 ///
2323 /// \param Kind Cleanup kind.
2324 ///
2325 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2326 /// structure that will be passed to _Block_object_dispose. When
2327 /// \p LoadBlockVarAddr is true, the address of the field of the block
2328 /// structure that holds the address of the __block structure.
2329 ///
2330 /// \param Flags The flag that will be passed to _Block_object_dispose.
2331 ///
2332 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2333 /// \p Addr to get the address of the __block structure.
2335 bool LoadBlockVarAddr, bool CanThrow);
2336
2337 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2338 llvm::Value *ptr);
2339
2342
2343 /// BuildBlockByrefAddress - Computes the location of the
2344 /// data in a variable which is declared as __block.
2346 bool followForward = true);
2348 const BlockByrefInfo &info,
2349 bool followForward,
2350 const llvm::Twine &name);
2351
2353
2355
2356 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2357 const CGFunctionInfo &FnInfo);
2358
2359 /// Annotate the function with an attribute that disables TSan checking at
2360 /// runtime.
2361 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2362
2363 /// Emit code for the start of a function.
2364 /// \param Loc The location to be associated with the function.
2365 /// \param StartLoc The location of the function body.
2367 QualType RetTy,
2368 llvm::Function *Fn,
2369 const CGFunctionInfo &FnInfo,
2370 const FunctionArgList &Args,
2372 SourceLocation StartLoc = SourceLocation());
2373
2375
2379 void EmitFunctionBody(const Stmt *Body);
2380 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2381
2382 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2383 CallArgList &CallArgs,
2384 const CGFunctionInfo *CallOpFnInfo = nullptr,
2385 llvm::Constant *CallOpFn = nullptr);
2389 CallArgList &CallArgs);
2391 const CGFunctionInfo **ImplFnInfo,
2392 llvm::Function **ImplFn);
2395 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2396 }
2397 void EmitAsanPrologueOrEpilogue(bool Prologue);
2398
2399 /// Emit the unified return block, trying to avoid its emission when
2400 /// possible.
2401 /// \return The debug location of the user written return statement if the
2402 /// return block is avoided.
2403 llvm::DebugLoc EmitReturnBlock();
2404
2405 /// FinishFunction - Complete IR generation of the current function. It is
2406 /// legal to call this function even if there is no current insertion point.
2408
2409 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2410 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2411
2412 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2413 const ThunkInfo *Thunk, bool IsUnprototyped);
2414
2416
2417 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2418 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2419 llvm::FunctionCallee Callee);
2420
2421 /// Generate a thunk for the given method.
2422 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2423 GlobalDecl GD, const ThunkInfo &Thunk,
2424 bool IsUnprototyped);
2425
2426 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2427 const CGFunctionInfo &FnInfo,
2428 GlobalDecl GD, const ThunkInfo &Thunk);
2429
2431 FunctionArgList &Args);
2432
2434
2435 /// Struct with all information about dynamic [sub]class needed to set vptr.
2436 struct VPtr {
2441 };
2442
2443 /// Initialize the vtable pointer of the given subobject.
2445
2447
2450
2452 CharUnits OffsetFromNearestVBase,
2453 bool BaseIsNonVirtualPrimaryBase,
2454 const CXXRecordDecl *VTableClass,
2455 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2456
2458
2459 // VTableTrapMode - whether we guarantee that loading the
2460 // vtable is guaranteed to trap on authentication failure,
2461 // even if the resulting vtable pointer is unused.
2462 enum class VTableAuthMode {
2464 MustTrap,
2465 UnsafeUbsanStrip // Should only be used for Vptr UBSan check
2466 };
2467 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2468 /// to by This.
2469 llvm::Value *
2470 GetVTablePtr(Address This, llvm::Type *VTableTy,
2471 const CXXRecordDecl *VTableClass,
2473
2482 };
2483
2484 /// Derived is the presumed address of an object of type T after a
2485 /// cast. If T is a polymorphic class type, emit a check that the virtual
2486 /// table for Derived belongs to a class derived from T.
2487 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2489
2490 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2491 /// If vptr CFI is enabled, emit a check that VTable is valid.
2492 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2494
2495 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2496 /// RD using llvm.type.test.
2497 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2499
2500 /// If whole-program virtual table optimization is enabled, emit an assumption
2501 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2502 /// enabled, emit a check that VTable is a member of RD's type identifier.
2504 llvm::Value *VTable, SourceLocation Loc);
2505
2506 /// Returns whether we should perform a type checked load when loading a
2507 /// virtual function for virtual calls to members of RD. This is generally
2508 /// true when both vcall CFI and whole-program-vtables are enabled.
2510
2511 /// Emit a type checked load from the given vtable.
2513 llvm::Value *VTable,
2514 llvm::Type *VTableTy,
2515 uint64_t VTableByteOffset);
2516
2517 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2518 /// given phase of destruction for a destructor. The end result
2519 /// should call destructors on members and base classes in reverse
2520 /// order of their construction.
2522
2523 /// ShouldInstrumentFunction - Return true if the current function should be
2524 /// instrumented with __cyg_profile_func_* calls
2526
2527 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2528 /// should not be instrumented with sanitizers.
2530
2531 /// ShouldXRayInstrument - Return true if the current function should be
2532 /// instrumented with XRay nop sleds.
2534
2535 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2536 /// XRay custom event handling calls.
2538
2539 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2540 /// XRay typed event handling calls.
2542
2543 /// Return a type hash constant for a function instrumented by
2544 /// -fsanitize=function.
2545 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2546
2547 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2548 /// arguments for the given function. This is also responsible for naming the
2549 /// LLVM function arguments.
2551 llvm::Function *Fn,
2552 const FunctionArgList &Args);
2553
2554 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2555 /// given temporary.
2556 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2557 SourceLocation EndLoc);
2558
2559 /// Emit a test that checks if the return value \p RV is nonnull.
2560 void EmitReturnValueCheck(llvm::Value *RV);
2561
2562 /// EmitStartEHSpec - Emit the start of the exception spec.
2563 void EmitStartEHSpec(const Decl *D);
2564
2565 /// EmitEndEHSpec - Emit the end of the exception spec.
2566 void EmitEndEHSpec(const Decl *D);
2567
2568 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2569 llvm::BasicBlock *getTerminateLandingPad();
2570
2571 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2572 /// terminate.
2573 llvm::BasicBlock *getTerminateFunclet();
2574
2575 /// getTerminateHandler - Return a handler (not a landing pad, just
2576 /// a catch handler) that just calls terminate. This is used when
2577 /// a terminate scope encloses a try.
2578 llvm::BasicBlock *getTerminateHandler();
2579
2581 llvm::Type *ConvertType(QualType T);
2583 llvm::Type *LLVMTy = nullptr);
2584 llvm::Type *ConvertType(const TypeDecl *T) {
2585 return ConvertType(getContext().getTypeDeclType(T));
2586 }
2587
2588 /// LoadObjCSelf - Load the value of self. This function is only valid while
2589 /// generating code for an Objective-C method.
2590 llvm::Value *LoadObjCSelf();
2591
2592 /// TypeOfSelfObject - Return type of object that this self represents.
2594
2595 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2597
2599 return getEvaluationKind(T) == TEK_Scalar;
2600 }
2601
2604 }
2605
2606 /// createBasicBlock - Create an LLVM basic block.
2607 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2608 llvm::Function *parent = nullptr,
2609 llvm::BasicBlock *before = nullptr) {
2610 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2611 }
2612
2613 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2614 /// label maps to.
2616
2617 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2618 /// another basic block, simplify it. This assumes that no other code could
2619 /// potentially reference the basic block.
2620 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2621
2622 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2623 /// adding a fall-through branch from the current insert block if
2624 /// necessary. It is legal to call this function even if there is no current
2625 /// insertion point.
2626 ///
2627 /// IsFinished - If true, indicates that the caller has finished emitting
2628 /// branches to the given block and does not expect to emit code into it. This
2629 /// means the block can be ignored if it is unreachable.
2630 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2631
2632 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2633 /// near its uses, and leave the insertion point in it.
2634 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2635
2636 /// EmitBranch - Emit a branch to the specified basic block from the current
2637 /// insert block, taking care to avoid creation of branches from dummy
2638 /// blocks. It is legal to call this function even if there is no current
2639 /// insertion point.
2640 ///
2641 /// This function clears the current insertion point. The caller should follow
2642 /// calls to this function with calls to Emit*Block prior to generation new
2643 /// code.
2644 void EmitBranch(llvm::BasicBlock *Block);
2645
2646 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2647 /// indicates that the current code being emitted is unreachable.
2648 bool HaveInsertPoint() const {
2649 return Builder.GetInsertBlock() != nullptr;
2650 }
2651
2652 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2653 /// emitted IR has a place to go. Note that by definition, if this function
2654 /// creates a block then that block is unreachable; callers may do better to
2655 /// detect when no insertion point is defined and simply skip IR generation.
2657 if (!HaveInsertPoint())
2659 }
2660
2661 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2662 /// specified stmt yet.
2663 void ErrorUnsupported(const Stmt *S, const char *Type);
2664
2665 //===--------------------------------------------------------------------===//
2666 // Helpers
2667 //===--------------------------------------------------------------------===//
2668
2670 llvm::BasicBlock *LHSBlock,
2671 llvm::BasicBlock *RHSBlock,
2672 llvm::BasicBlock *MergeBlock,
2673 QualType MergedType) {
2674 Builder.SetInsertPoint(MergeBlock);
2675 llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
2676 PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
2677 PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
2678 LHS.replaceBasePointer(PtrPhi);
2679 LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
2680 return LHS;
2681 }
2682
2683 /// Construct an address with the natural alignment of T. If a pointer to T
2684 /// is expected to be signed, the pointer passed to this function must have
2685 /// been signed, and the returned Address will have the pointer authentication
2686 /// information needed to authenticate the signed pointer.
2688 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2689 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2690 TBAAAccessInfo *TBAAInfo = nullptr,
2691 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2692 if (Alignment.isZero())
2693 Alignment =
2694 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
2695 return Address(Ptr, ConvertTypeForMem(T), Alignment,
2696 CGM.getPointerAuthInfoForPointeeType(T), /*Offset=*/nullptr,
2697 IsKnownNonNull);
2698 }
2699
2702 return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
2704 }
2705
2707 TBAAAccessInfo TBAAInfo) {
2708 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2709 }
2710
2711 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2713 return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
2715 }
2716
2717 /// Same as MakeAddrLValue above except that the pointer is known to be
2718 /// unsigned.
2719 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2721 Address Addr(V, ConvertTypeForMem(T), Alignment);
2722 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2724 }
2725
2726 LValue
2729 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2730 TBAAAccessInfo());
2731 }
2732
2733 /// Given a value of type T* that may not be to a complete object, construct
2734 /// an l-value with the natural pointee alignment of T.
2736
2737 LValue
2739 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
2740
2741 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2742 /// to be unsigned.
2744
2746
2748 LValueBaseInfo *PointeeBaseInfo = nullptr,
2749 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2752 AlignmentSource Source =
2754 LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2755 CGM.getTBAAAccessInfo(RefTy));
2756 return EmitLoadOfReferenceLValue(RefLVal);
2757 }
2758
2759 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2760 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2761 /// it is loaded from.
2763 LValueBaseInfo *BaseInfo = nullptr,
2764 TBAAAccessInfo *TBAAInfo = nullptr);
2766
2767private:
2768 struct AllocaTracker {
2769 void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
2770 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2771
2772 private:
2774 };
2775 AllocaTracker *Allocas = nullptr;
2776
2777public:
2778 // Captures all the allocas created during the scope of its RAII object.
2781 : CGF(CGF), OldTracker(CGF.Allocas) {
2782 CGF.Allocas = &Tracker;
2783 }
2784 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2785
2786 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2787
2788 private:
2789 CodeGenFunction &CGF;
2790 AllocaTracker *OldTracker;
2791 AllocaTracker Tracker;
2792 };
2793
2794 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2795 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2796 /// insertion point of the builder. The caller is responsible for setting an
2797 /// appropriate alignment on
2798 /// the alloca.
2799 ///
2800 /// \p ArraySize is the number of array elements to be allocated if it
2801 /// is not nullptr.
2802 ///
2803 /// LangAS::Default is the address space of pointers to local variables and
2804 /// temporaries, as exposed in the source language. In certain
2805 /// configurations, this is not the same as the alloca address space, and a
2806 /// cast is needed to lift the pointer from the alloca AS into
2807 /// LangAS::Default. This can happen when the target uses a restricted
2808 /// address space for the stack but the source language requires
2809 /// LangAS::Default to be a generic address space. The latter condition is
2810 /// common for most programming languages; OpenCL is an exception in that
2811 /// LangAS::Default is the private address space, which naturally maps
2812 /// to the stack.
2813 ///
2814 /// Because the address of a temporary is often exposed to the program in
2815 /// various ways, this function will perform the cast. The original alloca
2816 /// instruction is returned through \p Alloca if it is not nullptr.
2817 ///
2818 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2819 /// more efficient if the caller knows that the address will not be exposed.
2820 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2821 llvm::Value *ArraySize = nullptr);
2823 const Twine &Name = "tmp",
2824 llvm::Value *ArraySize = nullptr,
2825 RawAddress *Alloca = nullptr);
2827 const Twine &Name = "tmp",
2828 llvm::Value *ArraySize = nullptr);
2829
2830 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2831 /// default ABI alignment of the given LLVM type.
2832 ///
2833 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2834 /// any given AST type that happens to have been lowered to the
2835 /// given IR type. This should only ever be used for function-local,
2836 /// IR-driven manipulations like saving and restoring a value. Do
2837 /// not hand this address off to arbitrary IRGen routines, and especially
2838 /// do not pass it as an argument to a function that might expect a
2839 /// properly ABI-aligned value.
2841 const Twine &Name = "tmp");
2842
2843 /// CreateIRTemp - Create a temporary IR object of the given type, with
2844 /// appropriate alignment. This routine should only be used when an temporary
2845 /// value needs to be stored into an alloca (for example, to avoid explicit
2846 /// PHI construction), but the type is the IR type, not the type appropriate
2847 /// for storing in memory.
2848 ///
2849 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2850 /// ConvertType instead of ConvertTypeForMem.
2851 RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
2852
2853 /// CreateMemTemp - Create a temporary memory object of the given type, with
2854 /// appropriate alignmen and cast it to the default address space. Returns
2855 /// the original alloca instruction by \p Alloca if it is not nullptr.
2856 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2857 RawAddress *Alloca = nullptr);
2859 const Twine &Name = "tmp",
2860 RawAddress *Alloca = nullptr);
2861
2862 /// CreateMemTemp - Create a temporary memory object of the given type, with
2863 /// appropriate alignmen without casting it to the default address space.
2864 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2866 const Twine &Name = "tmp");
2867
2868 /// CreateAggTemp - Create a temporary memory object for the given
2869 /// aggregate type.
2870 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2871 RawAddress *Alloca = nullptr) {
2872 return AggValueSlot::forAddr(
2873 CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
2876 }
2877
2878 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2879 /// expression and compare the result against zero, returning an Int1Ty value.
2880 llvm::Value *EvaluateExprAsBool(const Expr *E);
2881
2882 /// Retrieve the implicit cast expression of the rhs in a binary operator
2883 /// expression by passing pointers to Value and QualType
2884 /// This is used for implicit bitfield conversion checks, which
2885 /// must compare with the value before potential truncation.
2887 llvm::Value **Previous,
2888 QualType *SrcType);
2889
2890 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2891 /// so we use the value after conversion.
2892 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2893 llvm::Value *Dst, QualType DstType,
2894 const CGBitFieldInfo &Info,
2896
2897 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2898 void EmitIgnoredExpr(const Expr *E);
2899
2900 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2901 /// any type. The result is returned as an RValue struct. If this is an
2902 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2903 /// the result should be returned.
2904 ///
2905 /// \param ignoreResult True if the resulting value isn't used.
2908 bool ignoreResult = false);
2909
2910 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2911 // or the value of the expression, depending on how va_list is defined.
2913
2914 /// Emit a "reference" to a __builtin_ms_va_list; this is
2915 /// always the value of the expression, because a __builtin_ms_va_list is a
2916 /// pointer to a char.
2918
2919 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2920 /// always be accessible even if no aggregate location is provided.
2922
2923 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2924 /// arbitrary expression into the given memory location.
2925 void EmitAnyExprToMem(const Expr *E, Address Location,
2926 Qualifiers Quals, bool IsInitializer);
2927
2928 void EmitAnyExprToExn(const Expr *E, Address Addr);
2929
2930 /// EmitExprAsInit - Emits the code necessary to initialize a
2931 /// location in memory with the given initializer.
2932 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2933 bool capturedByInit);
2934
2935 /// hasVolatileMember - returns true if aggregate type has a volatile
2936 /// member.
2938 if (const RecordType *RT = T->getAs<RecordType>()) {
2939 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2940 return RD->hasVolatileMember();
2941 }
2942 return false;
2943 }
2944
2945 /// Determine whether a return value slot may overlap some other object.
2947 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2948 // class subobjects. These cases may need to be revisited depending on the
2949 // resolution of the relevant core issue.
2951 }
2952
2953 /// Determine whether a field initialization may overlap some other object.
2955
2956 /// Determine whether a base class initialization may overlap some other
2957 /// object.
2959 const CXXRecordDecl *BaseRD,
2960 bool IsVirtual);
2961
2962 /// Emit an aggregate assignment.
2964 bool IsVolatile = hasVolatileMember(EltTy);
2965 EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2966 }
2967
2969 AggValueSlot::Overlap_t MayOverlap) {
2970 EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2971 }
2972
2973 /// EmitAggregateCopy - Emit an aggregate copy.
2974 ///
2975 /// \param isVolatile \c true iff either the source or the destination is
2976 /// volatile.
2977 /// \param MayOverlap Whether the tail padding of the destination might be
2978 /// occupied by some other object. More efficient code can often be
2979 /// generated if not.
2981 AggValueSlot::Overlap_t MayOverlap,
2982 bool isVolatile = false);
2983
2984 /// GetAddrOfLocalVar - Return the address of a local variable.
2986 auto it = LocalDeclMap.find(VD);
2987 assert(it != LocalDeclMap.end() &&
2988 "Invalid argument to GetAddrOfLocalVar(), no decl!");
2989 return it->second;
2990 }
2991
2992 /// Given an opaque value expression, return its LValue mapping if it exists,
2993 /// otherwise create one.
2995
2996 /// Given an opaque value expression, return its RValue mapping if it exists,
2997 /// otherwise create one.
2999
3000 /// Get the index of the current ArrayInitLoopExpr, if any.
3001 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3002
3003 /// getAccessedFieldNo - Given an encoded value and a result number, return
3004 /// the input field number being accessed.
3005 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3006
3007 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3008 llvm::BasicBlock *GetIndirectGotoBlock();
3009
3010 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3011 static bool IsWrappedCXXThis(const Expr *E);
3012
3013 /// EmitNullInitialization - Generate code to set a value of the given type to
3014 /// null, If the type contains data member pointers, they will be initialized
3015 /// to -1 in accordance with the Itanium C++ ABI.
3017
3018 /// Emits a call to an LLVM variable-argument intrinsic, either
3019 /// \c llvm.va_start or \c llvm.va_end.
3020 /// \param ArgValue A reference to the \c va_list as emitted by either
3021 /// \c EmitVAListRef or \c EmitMSVAListRef.
3022 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3023 /// calls \c llvm.va_end.
3024 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3025
3026 /// Generate code to get an argument from the passed in pointer
3027 /// and update it accordingly.
3028 /// \param VE The \c VAArgExpr for which to generate code.
3029 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3030 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3031 /// \returns A pointer to the argument.
3032 // FIXME: We should be able to get rid of this method and use the va_arg
3033 // instruction in LLVM instead once it works well enough.
3036
3037 /// emitArrayLength - Compute the length of an array, even if it's a
3038 /// VLA, and drill down to the base element type.
3040 QualType &baseType,
3041 Address &addr);
3042
3043 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3044 /// the given variably-modified type and store them in the VLASizeMap.
3045 ///
3046 /// This function can be called with a null (unreachable) insert point.
3048
3050 llvm::Value *NumElts;
3052
3053 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3054 };
3055
3056 /// Return the number of elements for a single dimension
3057 /// for the given array type.
3060
3061 /// Returns an LLVM value that corresponds to the size,
3062 /// in non-variably-sized elements, of a variable length array type,
3063 /// plus that largest non-variably-sized element type. Assumes that
3064 /// the type has already been emitted with EmitVariablyModifiedType.
3067
3068 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3069 /// generating code for an C++ member function.
3070 llvm::Value *LoadCXXThis() {
3071 assert(CXXThisValue && "no 'this' value for this function");
3072 return CXXThisValue;
3073 }
3075
3076 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3077 /// virtual bases.
3078 // FIXME: Every place that calls LoadCXXVTT is something
3079 // that needs to be abstracted properly.
3080 llvm::Value *LoadCXXVTT() {
3081 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3082 return CXXStructorImplicitParamValue;
3083 }
3084
3085 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3086 /// complete class to the given direct base.
3087 Address
3089 const CXXRecordDecl *Derived,
3090 const CXXRecordDecl *Base,
3091 bool BaseIsVirtual);
3092
3093 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3094
3095 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3096 /// load of 'this' and returns address of the base class.
3098 const CXXRecordDecl *Derived,
3101 bool NullCheckValue, SourceLocation Loc);
3102
3104 const CXXRecordDecl *Derived,
3107 bool NullCheckValue);
3108
3109 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3110 /// base constructor/destructor with virtual bases.
3111 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3112 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3113 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3114 bool Delegating);
3115
3117 CXXCtorType CtorType,
3118 const FunctionArgList &Args,
3120 // It's important not to confuse this and the previous function. Delegating
3121 // constructors are the C++0x feature. The constructor delegate optimization
3122 // is used to reduce duplication in the base and complete consturctors where
3123 // they are substantially the same.
3125 const FunctionArgList &Args);
3126
3127 /// Emit a call to an inheriting constructor (that is, one that invokes a
3128 /// constructor inherited from a base class) by inlining its definition. This
3129 /// is necessary if the ABI does not support forwarding the arguments to the
3130 /// base class constructor (because they're variadic or similar).
3132 CXXCtorType CtorType,
3133 bool ForVirtualBase,
3134 bool Delegating,
3135 CallArgList &Args);
3136
3137 /// Emit a call to a constructor inherited from a base class, passing the
3138 /// current constructor's arguments along unmodified (without even making
3139 /// a copy).
3141 bool ForVirtualBase, Address This,
3142 bool InheritedFromVBase,
3144
3146 bool ForVirtualBase, bool Delegating,
3147 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3148
3150 bool ForVirtualBase, bool Delegating,
3151 Address This, CallArgList &Args,
3153 SourceLocation Loc, bool NewPointerIsChecked);
3154
3155 /// Emit assumption load for all bases. Requires to be called only on
3156 /// most-derived class and not under construction of the object.
3158
3159 /// Emit assumption that vptr load == global vtable.
3160 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3161
3163 Address This, Address Src,
3164 const CXXConstructExpr *E);
3165
3167 const ArrayType *ArrayTy,
3168 Address ArrayPtr,
3169 const CXXConstructExpr *E,
3170 bool NewPointerIsChecked,
3171 bool ZeroInitialization = false);
3172
3174 llvm::Value *NumElements,
3175 Address ArrayPtr,
3176 const CXXConstructExpr *E,
3177 bool NewPointerIsChecked,
3178 bool ZeroInitialization = false);
3179
3181
3183 bool ForVirtualBase, bool Delegating, Address This,
3184 QualType ThisTy);
3185
3187 llvm::Type *ElementTy, Address NewPtr,
3188 llvm::Value *NumElements,
3189 llvm::Value *AllocSizeWithoutCookie);
3190
3191 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3192 Address Ptr);
3193
3198
3199 llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
3200 void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
3201
3202 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3204
3205 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3206 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3207 CharUnits CookieSize = CharUnits());
3208
3210 const CallExpr *TheCallExpr, bool IsDelete);
3211
3212 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3213 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3215
3216 /// Situations in which we might emit a check for the suitability of a
3217 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3218 /// compiler-rt.
3220 /// Checking the operand of a load. Must be suitably sized and aligned.
3222 /// Checking the destination of a store. Must be suitably sized and aligned.
3224 /// Checking the bound value in a reference binding. Must be suitably sized
3225 /// and aligned, but is not required to refer to an object (until the
3226 /// reference is used), per core issue 453.
3228 /// Checking the object expression in a non-static data member access. Must
3229 /// be an object within its lifetime.
3231 /// Checking the 'this' pointer for a call to a non-static member function.
3232 /// Must be an object within its lifetime.
3234 /// Checking the 'this' pointer for a constructor call.
3236 /// Checking the operand of a static_cast to a derived pointer type. Must be
3237 /// null or an object within its lifetime.
3239 /// Checking the operand of a static_cast to a derived reference type. Must
3240 /// be an object within its lifetime.
3242 /// Checking the operand of a cast to a base object. Must be suitably sized
3243 /// and aligned.
3245 /// Checking the operand of a cast to a virtual base object. Must be an
3246 /// object within its lifetime.
3248 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3250 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3251 /// null or an object within its lifetime.
3254
3255 /// Determine whether the pointer type check \p TCK permits null pointers.
3257
3258 /// Determine whether the pointer type check \p TCK requires a vptr check.
3260
3261 /// Whether any type-checking sanitizers are enabled. If \c false,
3262 /// calls to EmitTypeCheck can be skipped.
3264
3266 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3267 llvm::Value *ArraySize = nullptr) {
3269 return;
3270 EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
3271 SkippedChecks, ArraySize);
3272 }
3273
3275 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3276 SanitizerSet SkippedChecks = SanitizerSet(),
3277 llvm::Value *ArraySize = nullptr) {
3279 return;
3280 EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
3281 SkippedChecks, ArraySize);
3282 }
3283
3284 /// Emit a check that \p V is the address of storage of the
3285 /// appropriate size and alignment for an object of type \p Type
3286 /// (or if ArraySize is provided, for an array of that bound).
3288 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3289 SanitizerSet SkippedChecks = SanitizerSet(),
3290 llvm::Value *ArraySize = nullptr);
3291
3292 /// Emit a check that \p Base points into an array object, which
3293 /// we can access at index \p Index. \p Accessed should be \c false if we
3294 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3295 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
3296 QualType IndexType, bool Accessed);
3297 void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
3298 llvm::Value *Index, QualType IndexType,
3299 QualType IndexedType, bool Accessed);
3300
3301 // Find a struct's flexible array member and get its offset. It may be
3302 // embedded inside multiple sub-structs, but must still be the last field.
3303 const FieldDecl *
3305 const FieldDecl *FAMDecl,
3306 uint64_t &Offset);
3307
3308 /// Build an expression accessing the "counted_by" field.
3310 const FieldDecl *FAMDecl,
3311 const FieldDecl *CountDecl);
3312
3314 bool isInc, bool isPre);
3316 bool isInc, bool isPre);
3317
3318 /// Converts Location to a DebugLoc, if debug information is enabled.
3319 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3320
3321 /// Get the record field index as represented in debug info.
3322 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3323
3324
3325 //===--------------------------------------------------------------------===//
3326 // Declaration Emission
3327 //===--------------------------------------------------------------------===//
3328
3329 /// EmitDecl - Emit a declaration.
3330 ///
3331 /// This function can be called with a null (unreachable) insert point.
3332 void EmitDecl(const Decl &D);
3333
3334 /// EmitVarDecl - Emit a local variable declaration.
3335 ///
3336 /// This function can be called with a null (unreachable) insert point.
3337 void EmitVarDecl(const VarDecl &D);
3338
3339 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3340 bool capturedByInit);
3341
3343 llvm::Value *Address);
3344
3345 /// Determine whether the given initializer is trivial in the sense
3346 /// that it requires no code to be generated.
3348
3349 /// EmitAutoVarDecl - Emit an auto variable declaration.
3350 ///
3351 /// This function can be called with a null (unreachable) insert point.
3353
3355 friend class CodeGenFunction;
3356
3357 const VarDecl *Variable;
3358
3359 /// The address of the alloca for languages with explicit address space
3360 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3361 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3362 /// as a global constant.
3363 Address Addr;
3364
3365 llvm::Value *NRVOFlag;
3366
3367 /// True if the variable is a __block variable that is captured by an
3368 /// escaping block.
3369 bool IsEscapingByRef;
3370
3371 /// True if the variable is of aggregate type and has a constant
3372 /// initializer.
3373 bool IsConstantAggregate;
3374
3375 /// Non-null if we should use lifetime annotations.
3376 llvm::Value *SizeForLifetimeMarkers;
3377
3378 /// Address with original alloca instruction. Invalid if the variable was
3379 /// emitted as a global constant.
3380 RawAddress AllocaAddr;
3381
3382 struct Invalid {};
3383 AutoVarEmission(Invalid)
3384 : Variable(nullptr), Addr(Address::invalid()),
3385 AllocaAddr(RawAddress::invalid()) {}
3386
3387 AutoVarEmission(const VarDecl &variable)
3388 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3389 IsEscapingByRef(false), IsConstantAggregate(false),
3390 SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
3391
3392 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3393
3394 public:
3395 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3396
3397 bool useLifetimeMarkers() const {
3398 return SizeForLifetimeMarkers != nullptr;
3399 }
3400 llvm::Value *getSizeForLifetimeMarkers() const {
3401 assert(useLifetimeMarkers());
3402 return SizeForLifetimeMarkers;
3403 }
3404
3405 /// Returns the raw, allocated address, which is not necessarily
3406 /// the address of the object itself. It is casted to default
3407 /// address space for address space agnostic languages.
3409 return Addr;
3410 }
3411
3412 /// Returns the address for the original alloca instruction.
3413 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3414
3415 /// Returns the address of the object within this declaration.
3416 /// Note that this does not chase the forwarding pointer for
3417 /// __block decls.
3419 if (!IsEscapingByRef) return Addr;
3420
3421 return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
3422 }
3423 };
3425 void EmitAutoVarInit(const AutoVarEmission &emission);
3428 QualType::DestructionKind dtorKind);
3429
3430 /// Emits the alloca and debug information for the size expressions for each
3431 /// dimension of an array. It registers the association of its (1-dimensional)
3432 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3433 /// reference this node when creating the DISubrange object to describe the
3434 /// array types.
3436 const VarDecl &D,
3437 bool EmitDebugInfo);
3438
3440 llvm::GlobalValue::LinkageTypes Linkage);
3441
3443 union {
3445 llvm::Value *Value;
3446 };
3447
3448 bool IsIndirect;
3449
3450 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3451 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3452
3453 public:
3454 static ParamValue forDirect(llvm::Value *value) {
3455 return ParamValue(value);
3456 }
3458 assert(!addr.getAlignment().isZero());
3459 return ParamValue(addr);
3460 }
3461
3462 bool isIndirect() const { return IsIndirect; }
3463 llvm::Value *getAnyValue() const {
3464 if (!isIndirect())
3465 return Value;
3466 assert(!Addr.hasOffset() && "unexpected offset");
3467 return Addr.getBasePointer();
3468 }
3469
3470 llvm::Value *getDirectValue() const {
3471 assert(!isIndirect());
3472 return Value;
3473 }
3474
3476 assert(isIndirect());
3477 return Addr;
3478 }
3479 };
3480
3481 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3482 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3483
3484 /// protectFromPeepholes - Protect a value that we're intending to
3485 /// store to the side, but which will probably be used later, from
3486 /// aggressive peepholing optimizations that might delete it.
3487 ///
3488 /// Pass the result to unprotectFromPeepholes to declare that
3489 /// protection is no longer required.
3490 ///
3491 /// There's no particular reason why this shouldn't apply to
3492 /// l-values, it's just that no existing peepholes work on pointers.
3495
3496 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3498 SourceLocation AssumptionLoc,
3499 llvm::Value *Alignment,
3500 llvm::Value *OffsetValue,
3501 llvm::Value *TheCheck,
3502 llvm::Instruction *Assumption);
3503
3504 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3505 SourceLocation Loc, SourceLocation AssumptionLoc,
3506 llvm::Value *Alignment,
3507 llvm::Value *OffsetValue = nullptr);
3508
3509 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3510 SourceLocation AssumptionLoc,
3511 llvm::Value *Alignment,
3512 llvm::Value *OffsetValue = nullptr);
3513
3514 //===--------------------------------------------------------------------===//
3515 // Statement Emission
3516 //===--------------------------------------------------------------------===//
3517
3518 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3519 void EmitStopPoint(const Stmt *S);
3520
3521 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3522 /// this function even if there is no current insertion point.
3523 ///
3524 /// This function may clear the current insertion point; callers should use
3525 /// EnsureInsertPoint if they wish to subsequently generate code without first
3526 /// calling EmitBlock, EmitBranch, or EmitStmt.
3527 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = std::nullopt);
3528
3529 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3530 /// necessarily require an insertion point or debug information; typically
3531 /// because the statement amounts to a jump or a container of other
3532 /// statements.
3533 ///
3534 /// \return True if the statement was handled.
3536
3537 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3540 bool GetLast = false,
3541 AggValueSlot AVS =
3543
3544 /// EmitLabel - Emit the block for the given label. It is legal to call this
3545 /// function even if there is no current insertion point.
3546 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3547
3548 void EmitLabelStmt(const LabelStmt &S);
3550 void EmitGotoStmt(const GotoStmt &S);
3552 void EmitIfStmt(const IfStmt &S);
3553
3555 ArrayRef<const Attr *> Attrs = std::nullopt);
3556 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = std::nullopt);
3557 void EmitForStmt(const ForStmt &S,
3558 ArrayRef<const Attr *> Attrs = std::nullopt);
3560 void EmitDeclStmt(const DeclStmt &S);
3561 void EmitBreakStmt(const BreakStmt &S);
3567 void EmitAsmStmt(const AsmStmt &S);
3568
3574
3579 bool ignoreResult = false);
3583 bool ignoreResult = false);
3585 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3586
3587 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3588 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3589
3595 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3597
3599 llvm::Function *FinallyFunc);
3601 const Stmt *OutlinedStmt);
3602
3604 const SEHExceptStmt &Except);
3605
3607 const SEHFinallyStmt &Finally);
3608
3610 llvm::Value *ParentFP,
3611 llvm::Value *EntryEBP);
3612 llvm::Value *EmitSEHExceptionCode();
3613 llvm::Value *EmitSEHExceptionInfo();
3615
3616 /// Emit simple code for OpenMP directives in Simd-only mode.
3618
3619 /// Scan the outlined statement for captures from the parent function. For
3620 /// each capture, mark the capture as escaped and emit a call to
3621 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3623 bool IsFilter);
3624
3625 /// Recovers the address of a local in a parent function. ParentVar is the
3626 /// address of the variable used in the immediate parent function. It can
3627 /// either be an alloca or a call to llvm.localrecover if there are nested
3628 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3629 /// frame.
3631 Address ParentVar,
3632 llvm::Value *ParentFP);
3633
3635 ArrayRef<const Attr *> Attrs = std::nullopt);
3636
3637 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3639 CodeGenFunction &CGF;
3640
3641 public:
3643 bool HasCancel)
3644 : CGF(CGF) {
3645 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3646 }
3647 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3648 };
3649
3650 /// Returns calculated size of the specified type.
3651 llvm::Value *getTypeSize(QualType Ty);
3659 SmallVectorImpl<llvm::Value *> &CapturedVars);
3660 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3662 /// Perform element by element copying of arrays with type \a
3663 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3664 /// generated by \a CopyGen.
3665 ///
3666 /// \param DestAddr Address of the destination array.
3667 /// \param SrcAddr Address of the source array.
3668 /// \param OriginalType Type of destination and source arrays.
3669 /// \param CopyGen Copying procedure that copies value of single array element
3670 /// to another single array element.
3672 Address DestAddr, Address SrcAddr, QualType OriginalType,
3673 const llvm::function_ref<void(Address, Address)> CopyGen);
3674 /// Emit proper copying of data from one variable to another.
3675 ///
3676 /// \param OriginalType Original type of the copied variables.
3677 /// \param DestAddr Destination address.
3678 /// \param SrcAddr Source address.
3679 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3680 /// type of the base array element).
3681 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3682 /// the base array element).
3683 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3684 /// DestVD.
3685 void EmitOMPCopy(QualType OriginalType,
3686 Address DestAddr, Address SrcAddr,
3687 const VarDecl *DestVD, const VarDecl *SrcVD,
3688 const Expr *Copy);
3689 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3690 /// \a X = \a E \a BO \a E.
3691 ///
3692 /// \param X Value to be updated.
3693 /// \param E Update value.
3694 /// \param BO Binary operation for update operation.
3695 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3696 /// expression, false otherwise.
3697 /// \param AO Atomic ordering of the generated atomic instructions.
3698 /// \param CommonGen Code generator for complex expressions that cannot be
3699 /// expressed through atomicrmw instruction.
3700 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3701 /// generated, <false, RValue::get(nullptr)> otherwise.
3702 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3703 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3704 llvm::AtomicOrdering AO, SourceLocation Loc,
3705 const llvm::function_ref<RValue(RValue)> CommonGen);
3707 OMPPrivateScope &PrivateScope);
3709 OMPPrivateScope &PrivateScope);
3711 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3712 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3713 CaptureDeviceAddrMap);
3715 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3716 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3717 CaptureDeviceAddrMap);
3718 /// Emit code for copyin clause in \a D directive. The next code is
3719 /// generated at the start of outlined functions for directives:
3720 /// \code
3721 /// threadprivate_var1 = master_threadprivate_var1;
3722 /// operator=(threadprivate_var2, master_threadprivate_var2);
3723 /// ...
3724 /// __kmpc_barrier(&loc, global_tid);
3725 /// \endcode
3726 ///
3727 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3728 /// \returns true if at least one copyin variable is found, false otherwise.
3730 /// Emit initial code for lastprivate variables. If some variable is
3731 /// not also firstprivate, then the default initialization is used. Otherwise
3732 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3733 /// method.
3734 ///
3735 /// \param D Directive that may have 'lastprivate' directives.
3736 /// \param PrivateScope Private scope for capturing lastprivate variables for
3737 /// proper codegen in internal captured statement.
3738 ///
3739 /// \returns true if there is at least one lastprivate variable, false
3740 /// otherwise.
3742 OMPPrivateScope &PrivateScope);
3743 /// Emit final copying of lastprivate values to original variables at
3744 /// the end of the worksharing or simd directive.
3745 ///
3746 /// \param D Directive that has at least one 'lastprivate' directives.
3747 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3748 /// it is the last iteration of the loop code in associated directive, or to
3749 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3751 bool NoFinals,
3752 llvm::Value *IsLastIterCond = nullptr);
3753 /// Emit initial code for linear clauses.
3755 CodeGenFunction::OMPPrivateScope &PrivateScope);
3756 /// Emit final code for linear clauses.
3757 /// \param CondGen Optional conditional code for final part of codegen for
3758 /// linear clause.
3760 const OMPLoopDirective &D,
3761 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3762 /// Emit initial code for reduction variables. Creates reduction copies
3763 /// and initializes them with the values according to OpenMP standard.
3764 ///
3765 /// \param D Directive (possibly) with the 'reduction' clause.
3766 /// \param PrivateScope Private scope for capturing reduction variables for
3767 /// proper codegen in internal captured statement.
3768 ///
3770 OMPPrivateScope &PrivateScope,
3771 bool ForInscan = false);
3772 /// Emit final update of reduction values to original variables at
3773 /// the end of the directive.
3774 ///
3775 /// \param D Directive that has at least one 'reduction' directives.
3776 /// \param ReductionKind The kind of reduction to perform.
3778 const OpenMPDirectiveKind ReductionKind);
3779 /// Emit initial code for linear variables. Creates private copies
3780 /// and initializes them with the values according to OpenMP standard.
3781 ///
3782 /// \param D Directive (possibly) with the 'linear' clause.
3783 /// \return true if at least one linear variable is found that should be
3784 /// initialized with the value of the original variable, false otherwise.
3786
3787 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3788 llvm::Function * /*OutlinedFn*/,
3789 const OMPTaskDataTy & /*Data*/)>
3792 const OpenMPDirectiveKind CapturedRegion,
3793 const RegionCodeGenTy &BodyGen,
3794 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3801 explicit OMPTargetDataInfo() = default;
3804 unsigned NumberOfTargetItems)
3808 };
3810 const RegionCodeGenTy &BodyGen,
3811 OMPTargetDataInfo &InputInfo);
3814 CodeGenFunction &CGF,
3815 const CapturedStmt *CS,
3853 void
3856 void
3863 void
3879 void
3904
3905 /// Emit device code for the target directive.
3907 StringRef ParentName,
3908 const OMPTargetDirective &S);
3909 static void
3912 /// Emit device code for the target parallel for directive.
3914 CodeGenModule &CGM, StringRef ParentName,
3916 /// Emit device code for the target parallel for simd directive.
3918 CodeGenModule &CGM, StringRef ParentName,
3920 /// Emit device code for the target teams directive.
3921 static void
3923 const OMPTargetTeamsDirective &S);
3924 /// Emit device code for the target teams distribute directive.
3926 CodeGenModule &CGM, StringRef ParentName,
3928 /// Emit device code for the target teams distribute simd directive.
3930 CodeGenModule &CGM, StringRef ParentName,
3932 /// Emit device code for the target simd directive.
3934 StringRef ParentName,
3935 const OMPTargetSimdDirective &S);
3936 /// Emit device code for the target teams distribute parallel for simd
3937 /// directive.
3939 CodeGenModule &CGM, StringRef ParentName,
3941
3942 /// Emit device code for the target teams loop directive.
3944 CodeGenModule &CGM, StringRef ParentName,
3946
3947 /// Emit device code for the target parallel loop directive.
3949 CodeGenModule &CGM, StringRef ParentName,
3951
3953 CodeGenModule &CGM, StringRef ParentName,
3955
3956 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
3957 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
3958 /// future it is meant to be the number of loops expected in the loop nests
3959 /// (usually specified by the "collapse" clause) that are collapsed to a
3960 /// single loop by this function.
3961 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
3962 int Depth);
3963
3964 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
3966
3967 /// Emit inner loop of the worksharing/simd construct.
3968 ///
3969 /// \param S Directive, for which the inner loop must be emitted.
3970 /// \param RequiresCleanup true, if directive has some associated private
3971 /// variables.
3972 /// \param LoopCond Bollean condition for loop continuation.
3973 /// \param IncExpr Increment expression for loop control variable.
3974 /// \param BodyGen Generator for the inner body of the inner loop.
3975 /// \param PostIncGen Genrator for post-increment code (required for ordered
3976 /// loop directvies).
3978 const OMPExecutableDirective &S, bool RequiresCleanup,
3979 const Expr *LoopCond, const Expr *IncExpr,
3980 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3981 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3982
3984 /// Emit initial code for loop counters of loop-based directives.
3986 OMPPrivateScope &LoopScope);
3987
3988 /// Helper for the OpenMP loop directives.
3990
3991 /// Emit code for the worksharing loop-based directive.
3992 /// \return true, if this construct has any lastprivate clause, false -
3993 /// otherwise.
3995 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3996 const CodeGenDispatchBoundsTy &CGDispatchBounds);
3997
3998 /// Emit code for the distribute loop-based directive.
4000 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4001
4002 /// Helpers for the OpenMP loop directives.
4005 const OMPLoopDirective &D,
4006 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4007
4008 /// Emits the lvalue for the expression with possibly captured variable.
4010
4011private:
4012 /// Helpers for blocks.
4013 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4014
4015 /// struct with the values to be passed to the OpenMP loop-related functions
4016 struct OMPLoopArguments {
4017 /// loop lower bound
4019 /// loop upper bound
4021 /// loop stride
4023 /// isLastIteration argument for runtime functions
4025 /// Chunk value generated by sema
4026 llvm::Value *Chunk = nullptr;
4027 /// EnsureUpperBound
4028 Expr *EUB = nullptr;
4029 /// IncrementExpression
4030 Expr *IncExpr = nullptr;
4031 /// Loop initialization
4032 Expr *Init = nullptr;
4033 /// Loop exit condition
4034 Expr *Cond = nullptr;
4035 /// Update of LB after a whole chunk has been executed
4036 Expr *NextLB = nullptr;
4037 /// Update of UB after a whole chunk has been executed
4038 Expr *NextUB = nullptr;
4039 /// Distinguish between the for distribute and sections
4040 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4041 OMPLoopArguments() = default;
4042 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4043 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4044 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4045 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4046 Expr *NextUB = nullptr)
4047 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4048 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4049 NextUB(NextUB) {}
4050 };
4051 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4052 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4053 const OMPLoopArguments &LoopArgs,
4054 const CodeGenLoopTy &CodeGenLoop,
4055 const CodeGenOrderedTy &CodeGenOrdered);
4056 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4057 bool IsMonotonic, const OMPLoopDirective &S,
4058 OMPPrivateScope &LoopScope, bool Ordered,
4059 const OMPLoopArguments &LoopArgs,
4060 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4061 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4062 const OMPLoopDirective &S,
4063 OMPPrivateScope &LoopScope,
4064 const OMPLoopArguments &LoopArgs,
4065 const CodeGenLoopTy &CodeGenLoopContent);
4066 /// Emit code for sections directive.
4067 void EmitSections(const OMPExecutableDirective &S);
4068
4069public:
4070 //===--------------------------------------------------------------------===//
4071 // OpenACC Emission
4072 //===--------------------------------------------------------------------===//
4074 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4075 // simply emitting its structured block, but in the future we will implement
4076 // some sort of IR.
4077 EmitStmt(S.getStructuredBlock());
4078 }
4079
4081 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4082 // simply emitting its loop, but in the future we will implement
4083 // some sort of IR.
4084 EmitStmt(S.getLoop());
4085 }
4086
4087 //===--------------------------------------------------------------------===//
4088 // LValue Expression Emission
4089 //===--------------------------------------------------------------------===//
4090
4091 /// Create a check that a scalar RValue is non-null.
4093
4094 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4096
4097 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4098 /// and issue an ErrorUnsupported style diagnostic (using the
4099 /// provided Name).
4101 const char *Name);
4102
4103 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4104 /// an ErrorUnsupported style diagnostic (using the provided Name).
4106 const char *Name);
4107
4108 /// EmitLValue - Emit code to compute a designator that specifies the location
4109 /// of the expression.
4110 ///
4111 /// This can return one of two things: a simple address or a bitfield
4112 /// reference. In either case, the LLVM Value* in the LValue structure is
4113 /// guaranteed to be an LLVM pointer type.
4114 ///
4115 /// If this returns a bitfield reference, nothing about the pointee type of
4116 /// the LLVM value is known: For example, it may not be a pointer to an
4117 /// integer.
4118 ///
4119 /// If this returns a normal address, and if the lvalue's C type is fixed
4120 /// size, this method guarantees that the returned pointer type will point to
4121 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4122 /// variable length type, this is not possible.
4123 ///
4125 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4126
4127private:
4128 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4129