clang 19.0.0git
CodeGenFunction.h
Go to the documentation of this file.
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGDebugInfo.h"
18#include "CGLoopInfo.h"
19#include "CGValue.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "EHScopeStack.h"
23#include "VarBypassDetector.h"
24#include "clang/AST/CharUnits.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class LLVMContext;
51class MDNode;
52class SwitchInst;
53class Twine;
54class Value;
55class CanonicalLoopInfo;
56}
57
58namespace clang {
59class ASTContext;
60class CXXDestructorDecl;
61class CXXForRangeStmt;
62class CXXTryStmt;
63class Decl;
64class LabelDecl;
65class FunctionDecl;
66class FunctionProtoType;
67class LabelStmt;
68class ObjCContainerDecl;
69class ObjCInterfaceDecl;
70class ObjCIvarDecl;
71class ObjCMethodDecl;
72class ObjCImplementationDecl;
73class ObjCPropertyImplDecl;
74class TargetInfo;
75class VarDecl;
76class ObjCForCollectionStmt;
77class ObjCAtTryStmt;
78class ObjCAtThrowStmt;
79class ObjCAtSynchronizedStmt;
80class ObjCAutoreleasePoolStmt;
81class OMPUseDevicePtrClause;
82class OMPUseDeviceAddrClause;
83class SVETypeFlags;
84class OMPExecutableDirective;
85
86namespace analyze_os_log {
87class OSLogBufferLayout;
88}
89
90namespace CodeGen {
91class CodeGenTypes;
92class CGCallee;
93class CGFunctionInfo;
94class CGBlockInfo;
95class CGCXXABI;
96class BlockByrefHelpers;
97class BlockByrefInfo;
98class BlockFieldFlags;
99class RegionCodeGenTy;
100class TargetCodeGenInfo;
101struct OMPTaskDataTy;
102struct CGCoroData;
103
104/// The kind of evaluation to perform on values of a particular
105/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
106/// CGExprAgg?
107///
108/// TODO: should vectors maybe be split out into their own thing?
114
115#define LIST_SANITIZER_CHECKS \
116 SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
117 SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
118 SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
119 SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
120 SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
121 SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
122 SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
123 SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
124 SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
125 SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
126 SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
127 SANITIZER_CHECK(MissingReturn, missing_return, 0) \
128 SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
129 SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
130 SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
131 SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
132 SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
133 SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
134 SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
135 SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
136 SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
137 SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
138 SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
139 SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
140 SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
141
143#define SANITIZER_CHECK(Enum, Name, Version) Enum,
145#undef SANITIZER_CHECK
147
148/// Helper class with most of the code for saving a value for a
149/// conditional expression cleanup.
151 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
152
153 /// Answer whether the given value needs extra work to be saved.
154 static bool needsSaving(llvm::Value *value) {
155 if (!value)
156 return false;
157
158 // If it's not an instruction, we don't need to save.
159 if (!isa<llvm::Instruction>(value)) return false;
160
161 // If it's an instruction in the entry block, we don't need to save.
162 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
163 return (block != &block->getParent()->getEntryBlock());
164 }
165
166 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
167 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
168};
169
170/// A partial specialization of DominatingValue for llvm::Values that
171/// might be llvm::Instructions.
172template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
173 typedef T *type;
175 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
176 }
177};
178
179/// A specialization of DominatingValue for Address.
180template <> struct DominatingValue<Address> {
181 typedef Address type;
182
183 struct saved_type {
185 llvm::Type *ElementType;
188 llvm::PointerType *EffectiveType;
189 };
190
191 static bool needsSaving(type value) {
194 return true;
195 return false;
196 }
197 static saved_type save(CodeGenFunction &CGF, type value) {
198 return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
199 value.getElementType(), value.getAlignment(),
200 DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
201 }
203 return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
204 value.ElementType, value.Alignment,
205 DominatingLLVMValue::restore(CGF, value.Offset));
206 }
207};
208
209/// A specialization of DominatingValue for RValue.
210template <> struct DominatingValue<RValue> {
211 typedef RValue type;
213 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
214 AggregateAddress, ComplexAddress };
215 union {
216 struct {
218 } Vals;
220 };
221 LLVM_PREFERRED_TYPE(Kind)
222 unsigned K : 3;
223 unsigned IsVolatile : 1;
224
226 : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
227
230 : Vals{Val1, Val2}, K(ComplexAddress) {}
231
233 bool IsVolatile, unsigned K)
234 : AggregateAddr(AggregateAddr), K(K) {}
235
236 public:
237 static bool needsSaving(RValue value);
240
241 // implementations in CGCleanup.cpp
242 };
243
244 static bool needsSaving(type value) {
245 return saved_type::needsSaving(value);
246 }
247 static saved_type save(CodeGenFunction &CGF, type value) {
248 return saved_type::save(CGF, value);
249 }
251 return value.restore(CGF);
252 }
253};
254
255/// CodeGenFunction - This class organizes the per-function state that is used
256/// while generating LLVM code.
258 CodeGenFunction(const CodeGenFunction &) = delete;
259 void operator=(const CodeGenFunction &) = delete;
260
261 friend class CGCXXABI;
262public:
263 /// A jump destination is an abstract label, branching to which may
264 /// require a jump out through normal cleanups.
265 struct JumpDest {
266 JumpDest() : Block(nullptr), Index(0) {}
267 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
268 unsigned Index)
269 : Block(Block), ScopeDepth(Depth), Index(Index) {}
270
271 bool isValid() const { return Block != nullptr; }
272 llvm::BasicBlock *getBlock() const { return Block; }
273 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
274 unsigned getDestIndex() const { return Index; }
275
276 // This should be used cautiously.
278 ScopeDepth = depth;
279 }
280
281 private:
282 llvm::BasicBlock *Block;
284 unsigned Index;
285 };
286
287 CodeGenModule &CGM; // Per-module state.
289
290 // For EH/SEH outlined funclets, this field points to parent's CGF
292
293 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
296
297 // Stores variables for which we can't generate correct lifetime markers
298 // because of jumps.
300
301 /// List of recently emitted OMPCanonicalLoops.
302 ///
303 /// Since OMPCanonicalLoops are nested inside other statements (in particular
304 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
305 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
306 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
307 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
308 /// this stack when done. Entering a new loop requires clearing this list; it
309 /// either means we start parsing a new loop nest (in which case the previous
310 /// loop nest goes out of scope) or a second loop in the same level in which
311 /// case it would be ambiguous into which of the two (or more) loops the loop
312 /// nest would extend.
314
315 /// Stack to track the Logical Operator recursion nest for MC/DC.
317
318 /// Number of nested loop to be consumed by the last surrounding
319 /// loop-associated directive.
321
322 // CodeGen lambda for loops and support for ordered clause
323 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
324 JumpDest)>
326 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
327 const unsigned, const bool)>
329
330 // Codegen lambda for loop bounds in worksharing loop constructs
331 typedef llvm::function_ref<std::pair<LValue, LValue>(
334
335 // Codegen lambda for loop bounds in dispatch-based loop implementation
336 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
338 Address UB)>
340
341 /// CGBuilder insert helper. This function is called after an
342 /// instruction is created using Builder.
343 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
344 llvm::BasicBlock *BB,
345 llvm::BasicBlock::iterator InsertPt) const;
346
347 /// CurFuncDecl - Holds the Decl for the current outermost
348 /// non-closure context.
349 const Decl *CurFuncDecl = nullptr;
350 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
351 const Decl *CurCodeDecl = nullptr;
352 const CGFunctionInfo *CurFnInfo = nullptr;
354 llvm::Function *CurFn = nullptr;
355
356 /// Save Parameter Decl for coroutine.
358
359 // Holds coroutine data if the current function is a coroutine. We use a
360 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
361 // in this header.
362 struct CGCoroInfo {
363 std::unique_ptr<CGCoroData> Data;
364 bool InSuspendBlock = false;
365 CGCoroInfo();
366 ~CGCoroInfo();
367 };
369
370 bool isCoroutine() const {
371 return CurCoro.Data != nullptr;
372 }
373
374 bool inSuspendBlock() const {
376 }
377
378 // Holds FramePtr for await_suspend wrapper generation,
379 // so that __builtin_coro_frame call can be lowered
380 // directly to value of its second argument
382 llvm::Value *FramePtr = nullptr;
383 };
385
386 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
387 // It encapsulates SuspendExpr in a function, to separate it's body
388 // from the main coroutine to avoid miscompilations. Intrinisic
389 // is lowered to this function call in CoroSplit pass
390 // Function signature is:
391 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
392 // where type is one of (void, i1, ptr)
393 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
394 Twine const &SuspendPointName,
395 CoroutineSuspendExpr const &S);
396
397 /// CurGD - The GlobalDecl for the current function being compiled.
399
400 /// PrologueCleanupDepth - The cleanup depth enclosing all the
401 /// cleanups associated with the parameters.
403
404 /// ReturnBlock - Unified return block.
406
407 /// ReturnValue - The temporary alloca to hold the return
408 /// value. This is invalid iff the function has no return value.
410
411 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
412 /// This is invalid if sret is not in use.
414
415 /// If a return statement is being visited, this holds the return statment's
416 /// result expression.
417 const Expr *RetExpr = nullptr;
418
419 /// Return true if a label was seen in the current scope.
421 if (CurLexicalScope)
422 return CurLexicalScope->hasLabels();
423 return !LabelMap.empty();
424 }
425
426 /// AllocaInsertPoint - This is an instruction in the entry block before which
427 /// we prefer to insert allocas.
428 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
429
430private:
431 /// PostAllocaInsertPt - This is a place in the prologue where code can be
432 /// inserted that will be dominated by all the static allocas. This helps
433 /// achieve two things:
434 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
435 /// 2. All other prologue code (which are dominated by static allocas) do
436 /// appear in the source order immediately after all static allocas.
437 ///
438 /// PostAllocaInsertPt will be lazily created when it is *really* required.
439 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
440
441public:
442 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
443 /// immediately after AllocaInsertPt.
444 llvm::Instruction *getPostAllocaInsertPoint() {
445 if (!PostAllocaInsertPt) {
446 assert(AllocaInsertPt &&
447 "Expected static alloca insertion point at function prologue");
448 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
449 "EBB should be entry block of the current code gen function");
450 PostAllocaInsertPt = AllocaInsertPt->clone();
451 PostAllocaInsertPt->setName("postallocapt");
452 PostAllocaInsertPt->insertAfter(AllocaInsertPt);
453 }
454
455 return PostAllocaInsertPt;
456 }
457
458 /// API for captured statement code generation.
460 public:
462 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
465 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
466
468 S.getCapturedRecordDecl()->field_begin();
469 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
470 E = S.capture_end();
471 I != E; ++I, ++Field) {
472 if (I->capturesThis())
473 CXXThisFieldDecl = *Field;
474 else if (I->capturesVariable())
475 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
476 else if (I->capturesVariableByCopy())
477 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
478 }
479 }
480
481 virtual ~CGCapturedStmtInfo();
482
483 CapturedRegionKind getKind() const { return Kind; }
484
485 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
486 // Retrieve the value of the context parameter.
487 virtual llvm::Value *getContextValue() const { return ThisValue; }
488
489 /// Lookup the captured field decl for a variable.
490 virtual const FieldDecl *lookup(const VarDecl *VD) const {
491 return CaptureFields.lookup(VD->getCanonicalDecl());
492 }
493
494 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
495 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
496
497 static bool classof(const CGCapturedStmtInfo *) {
498 return true;
499 }
500
501 /// Emit the captured statement body.
502 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
504 CGF.EmitStmt(S);
505 }
506
507 /// Get the name of the capture helper.
508 virtual StringRef getHelperName() const { return "__captured_stmt"; }
509
510 /// Get the CaptureFields
511 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
512 return CaptureFields;
513 }
514
515 private:
516 /// The kind of captured statement being generated.
518
519 /// Keep the map between VarDecl and FieldDecl.
520 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
521
522 /// The base address of the captured record, passed in as the first
523 /// argument of the parallel region function.
524 llvm::Value *ThisValue;
525
526 /// Captured 'this' type.
527 FieldDecl *CXXThisFieldDecl;
528 };
530
531 /// RAII for correct setting/restoring of CapturedStmtInfo.
533 private:
534 CodeGenFunction &CGF;
535 CGCapturedStmtInfo *PrevCapturedStmtInfo;
536 public:
538 CGCapturedStmtInfo *NewCapturedStmtInfo)
539 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
540 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
541 }
542 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
543 };
544
545 /// An abstract representation of regular/ObjC call/message targets.
547 /// The function declaration of the callee.
548 const Decl *CalleeDecl;
549
550 public:
551 AbstractCallee() : CalleeDecl(nullptr) {}
552 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
553 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
554 bool hasFunctionDecl() const {
555 return isa_and_nonnull<FunctionDecl>(CalleeDecl);
556 }
557 const Decl *getDecl() const { return CalleeDecl; }
558 unsigned getNumParams() const {
559 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
560 return FD->getNumParams();
561 return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
562 }
563 const ParmVarDecl *getParamDecl(unsigned I) const {
564 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
565 return FD->getParamDecl(I);
566 return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
567 }
568 };
569
570 /// Sanitizers enabled for this function.
572
573 /// True if CodeGen currently emits code implementing sanitizer checks.
574 bool IsSanitizerScope = false;
575
576 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
578 CodeGenFunction *CGF;
579 public:
582 };
583
584 /// In C++, whether we are code generating a thunk. This controls whether we
585 /// should emit cleanups.
586 bool CurFuncIsThunk = false;
587
588 /// In ARC, whether we should autorelease the return value.
589 bool AutoreleaseResult = false;
590
591 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
592 /// potentially set the return value.
593 bool SawAsmBlock = false;
594
596
597 /// True if the current function is an outlined SEH helper. This can be a
598 /// finally block or filter expression.
600
601 /// True if CodeGen currently emits code inside presereved access index
602 /// region.
604
605 /// True if the current statement has nomerge attribute.
607
608 /// True if the current statement has noinline attribute.
610
611 /// True if the current statement has always_inline attribute.
613
614 // The CallExpr within the current statement that the musttail attribute
615 // applies to. nullptr if there is no 'musttail' on the current statement.
616 const CallExpr *MustTailCall = nullptr;
617
618 /// Returns true if a function must make progress, which means the
619 /// mustprogress attribute can be added.
621 if (CGM.getCodeGenOpts().getFiniteLoops() ==
623 return false;
624
625 // C++11 and later guarantees that a thread eventually will do one of the
626 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
627 // - terminate,
628 // - make a call to a library I/O function,
629 // - perform an access through a volatile glvalue, or
630 // - perform a synchronization operation or an atomic operation.
631 //
632 // Hence each function is 'mustprogress' in C++11 or later.
633 return getLangOpts().CPlusPlus11;
634 }
635
636 /// Returns true if a loop must make progress, which means the mustprogress
637 /// attribute can be added. \p HasConstantCond indicates whether the branch
638 /// condition is a known constant.
639 bool checkIfLoopMustProgress(bool HasConstantCond) {
640 if (CGM.getCodeGenOpts().getFiniteLoops() ==
642 return true;
643 if (CGM.getCodeGenOpts().getFiniteLoops() ==
645 return false;
646
647 // If the containing function must make progress, loops also must make
648 // progress (as in C++11 and later).
650 return true;
651
652 // Now apply rules for plain C (see 6.8.5.6 in C11).
653 // Loops with constant conditions do not have to make progress in any C
654 // version.
655 if (HasConstantCond)
656 return false;
657
658 // Loops with non-constant conditions must make progress in C11 and later.
659 return getLangOpts().C11;
660 }
661
663 llvm::Value *BlockPointer = nullptr;
664
665 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
667
668 /// A mapping from NRVO variables to the flags used to indicate
669 /// when the NRVO has been applied to this variable.
670 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
671
674
675 // A stack of cleanups which were added to EHStack but have to be deactivated
676 // later before being popped or emitted. These are usually deactivated on
677 // exiting a `CleanupDeactivationScope` scope. For instance, after a
678 // full-expr.
679 //
680 // These are specially useful for correctly emitting cleanups while
681 // encountering branches out of expression (through stmt-expr or coroutine
682 // suspensions).
685 llvm::Instruction *DominatingIP;
686 };
688
689 // Enters a new scope for capturing cleanups which are deferred to be
690 // deactivated, all of which will be deactivated once the scope is exited.
699
701 assert(!Deactivated && "Deactivating already deactivated scope");
703 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
704 CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
705 Stack[I - 1].DominatingIP);
706 Stack[I - 1].DominatingIP->eraseFromParent();
707 }
708 Stack.resize(OldDeactivateCleanupStackSize);
709 Deactivated = true;
710 }
711
713 if (Deactivated)
714 return;
716 }
717 };
718
720
721 llvm::Instruction *CurrentFuncletPad = nullptr;
722
724 bool isRedundantBeforeReturn() override { return true; }
725
726 llvm::Value *Addr;
727 llvm::Value *Size;
728
729 public:
730 CallLifetimeEnd(RawAddress addr, llvm::Value *size)
731 : Addr(addr.getPointer()), Size(size) {}
732
733 void Emit(CodeGenFunction &CGF, Flags flags) override {
734 CGF.EmitLifetimeEnd(Size, Addr);
735 }
736 };
737
738 /// Header for data within LifetimeExtendedCleanupStack.
740 /// The size of the following cleanup object.
741 unsigned Size;
742 /// The kind of cleanup to push.
743 LLVM_PREFERRED_TYPE(CleanupKind)
745 /// Whether this is a conditional cleanup.
746 LLVM_PREFERRED_TYPE(bool)
747 unsigned IsConditional : 1;
748
749 size_t getSize() const { return Size; }
750 CleanupKind getKind() const { return (CleanupKind)Kind; }
751 bool isConditional() const { return IsConditional; }
752 };
753
754 /// i32s containing the indexes of the cleanup destinations.
756
758
759 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
760 llvm::BasicBlock *EHResumeBlock = nullptr;
761
762 /// The exception slot. All landing pads write the current exception pointer
763 /// into this alloca.
764 llvm::Value *ExceptionSlot = nullptr;
765
766 /// The selector slot. Under the MandatoryCleanup model, all landing pads
767 /// write the current selector value into this alloca.
768 llvm::AllocaInst *EHSelectorSlot = nullptr;
769
770 /// A stack of exception code slots. Entering an __except block pushes a slot
771 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
772 /// a value from the top of the stack.
774
775 /// Value returned by __exception_info intrinsic.
776 llvm::Value *SEHInfo = nullptr;
777
778 /// Emits a landing pad for the current EH stack.
779 llvm::BasicBlock *EmitLandingPad();
780
781 llvm::BasicBlock *getInvokeDestImpl();
782
783 /// Parent loop-based directive for scan directive.
785 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
786 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
787 llvm::BasicBlock *OMPScanExitBlock = nullptr;
788 llvm::BasicBlock *OMPScanDispatch = nullptr;
789 bool OMPFirstScanLoop = false;
790
791 /// Manages parent directive for scan directives.
793 CodeGenFunction &CGF;
794 const OMPExecutableDirective *ParentLoopDirectiveForScan;
795
796 public:
798 CodeGenFunction &CGF,
799 const OMPExecutableDirective &ParentLoopDirectiveForScan)
800 : CGF(CGF),
801 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
802 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
803 }
805 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
806 }
807 };
808
809 template <class T>
811 return DominatingValue<T>::save(*this, value);
812 }
813
815 public:
816 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
817 CGFPOptionsRAII(CodeGenFunction &CGF, const Expr *E);
819
820 private:
821 void ConstructorHelper(FPOptions FPFeatures);
822 CodeGenFunction &CGF;
823 FPOptions OldFPFeatures;
824 llvm::fp::ExceptionBehavior OldExcept;
825 llvm::RoundingMode OldRounding;
826 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
827 };
829
830public:
831 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
832 /// rethrows.
834
835 /// A class controlling the emission of a finally block.
837 /// Where the catchall's edge through the cleanup should go.
838 JumpDest RethrowDest;
839
840 /// A function to call to enter the catch.
841 llvm::FunctionCallee BeginCatchFn;
842
843 /// An i1 variable indicating whether or not the @finally is
844 /// running for an exception.
845 llvm::AllocaInst *ForEHVar = nullptr;
846
847 /// An i8* variable into which the exception pointer to rethrow
848 /// has been saved.
849 llvm::AllocaInst *SavedExnVar = nullptr;
850
851 public:
852 void enter(CodeGenFunction &CGF, const Stmt *Finally,
853 llvm::FunctionCallee beginCatchFn,
854 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
855 void exit(CodeGenFunction &CGF);
856 };
857
858 /// Returns true inside SEH __try blocks.
859 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
860
861 /// Returns true while emitting a cleanuppad.
862 bool isCleanupPadScope() const {
863 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
864 }
865
866 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
867 /// current full-expression. Safe against the possibility that
868 /// we're currently inside a conditionally-evaluated expression.
869 template <class T, class... As>
870 void pushFullExprCleanup(CleanupKind kind, As... A) {
871 // If we're not in a conditional branch, or if none of the
872 // arguments requires saving, then use the unconditional cleanup.
874 return EHStack.pushCleanup<T>(kind, A...);
875
876 // Stash values in a tuple so we can guarantee the order of saves.
877 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
878 SavedTuple Saved{saveValueInCond(A)...};
879
880 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
881 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
883 }
884
885 /// Queue a cleanup to be pushed after finishing the current full-expression,
886 /// potentially with an active flag.
887 template <class T, class... As>
890 return pushCleanupAfterFullExprWithActiveFlag<T>(
891 Kind, RawAddress::invalid(), A...);
892
893 RawAddress ActiveFlag = createCleanupActiveFlag();
894 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
895 "cleanup active flag should never need saving");
896
897 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
898 SavedTuple Saved{saveValueInCond(A)...};
899
900 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
901 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag, Saved);
902 }
903
904 template <class T, class... As>
906 RawAddress ActiveFlag, As... A) {
907 LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
908 ActiveFlag.isValid()};
909
910 size_t OldSize = LifetimeExtendedCleanupStack.size();
912 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
913 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
914
915 static_assert(sizeof(Header) % alignof(T) == 0,
916 "Cleanup will be allocated on misaligned address");
917 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
918 new (Buffer) LifetimeExtendedCleanupHeader(Header);
919 new (Buffer + sizeof(Header)) T(A...);
920 if (Header.IsConditional)
921 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
922 }
923
924 // Push a cleanup onto EHStack and deactivate it later. It is usually
925 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
926 // full expression).
927 template <class T, class... As>
929 // Placeholder dominating IP for this cleanup.
930 llvm::Instruction *DominatingIP =
931 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
932 EHStack.pushCleanup<T>(Kind, A...);
934 {EHStack.stable_begin(), DominatingIP});
935 }
936
937 /// Set up the last cleanup that was pushed as a conditional
938 /// full-expression cleanup.
941 }
942
945
946 /// PushDestructorCleanup - Push a cleanup to call the
947 /// complete-object destructor of an object of the given type at the
948 /// given address. Does nothing if T is not a C++ class type with a
949 /// non-trivial destructor.
951
952 /// PushDestructorCleanup - Push a cleanup to call the
953 /// complete-object variant of the given destructor on the object at
954 /// the given address.
956 Address Addr);
957
958 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
959 /// process all branch fixups.
960 void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
961
962 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
963 /// The block cannot be reactivated. Pops it if it's the top of the
964 /// stack.
965 ///
966 /// \param DominatingIP - An instruction which is known to
967 /// dominate the current IP (if set) and which lies along
968 /// all paths of execution between the current IP and the
969 /// the point at which the cleanup comes into scope.
971 llvm::Instruction *DominatingIP);
972
973 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
974 /// Cannot be used to resurrect a deactivated cleanup.
975 ///
976 /// \param DominatingIP - An instruction which is known to
977 /// dominate the current IP (if set) and which lies along
978 /// all paths of execution between the current IP and the
979 /// the point at which the cleanup comes into scope.
981 llvm::Instruction *DominatingIP);
982
983 /// Enters a new scope for capturing cleanups, all of which
984 /// will be executed once the scope is exited.
986 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
987 size_t LifetimeExtendedCleanupStackSize;
988 CleanupDeactivationScope DeactivateCleanups;
989 bool OldDidCallStackSave;
990 protected:
992 private:
993
994 RunCleanupsScope(const RunCleanupsScope &) = delete;
995 void operator=(const RunCleanupsScope &) = delete;
996
997 protected:
999
1000 public:
1001 /// Enter a new cleanup scope.
1003 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
1004 CleanupStackDepth = CGF.EHStack.stable_begin();
1005 LifetimeExtendedCleanupStackSize =
1007 OldDidCallStackSave = CGF.DidCallStackSave;
1008 CGF.DidCallStackSave = false;
1009 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
1010 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
1011 }
1012
1013 /// Exit this cleanup scope, emitting any accumulated cleanups.
1015 if (PerformCleanup)
1016 ForceCleanup();
1017 }
1018
1019 /// Determine whether this scope requires any cleanups.
1020 bool requiresCleanups() const {
1021 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1022 }
1023
1024 /// Force the emission of cleanups now, instead of waiting
1025 /// until this object is destroyed.
1026 /// \param ValuesToReload - A list of values that need to be available at
1027 /// the insertion point after cleanup emission. If cleanup emission created
1028 /// a shared cleanup block, these value pointers will be rewritten.
1029 /// Otherwise, they not will be modified.
1030 void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
1031 assert(PerformCleanup && "Already forced cleanup");
1032 CGF.DidCallStackSave = OldDidCallStackSave;
1033 DeactivateCleanups.ForceDeactivate();
1034 CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
1035 ValuesToReload);
1036 PerformCleanup = false;
1037 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1038 }
1039 };
1040
1041 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1044
1046 SourceRange Range;
1048 LexicalScope *ParentScope;
1049
1050 LexicalScope(const LexicalScope &) = delete;
1051 void operator=(const LexicalScope &) = delete;
1052
1053 public:
1054 /// Enter a new cleanup scope.
1056 : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
1057 CGF.CurLexicalScope = this;
1058 if (CGDebugInfo *DI = CGF.getDebugInfo())
1059 DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
1060 }
1061
1062 void addLabel(const LabelDecl *label) {
1063 assert(PerformCleanup && "adding label to dead scope?");
1064 Labels.push_back(label);
1065 }
1066
1067 /// Exit this cleanup scope, emitting any accumulated
1068 /// cleanups.
1070 if (CGDebugInfo *DI = CGF.getDebugInfo())
1071 DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
1072
1073 // If we should perform a cleanup, force them now. Note that
1074 // this ends the cleanup scope before rescoping any labels.
1075 if (PerformCleanup) {
1076 ApplyDebugLocation DL(CGF, Range.getEnd());
1077 ForceCleanup();
1078 }
1079 }
1080
1081 /// Force the emission of cleanups now, instead of waiting
1082 /// until this object is destroyed.
1084 CGF.CurLexicalScope = ParentScope;
1086
1087 if (!Labels.empty())
1088 rescopeLabels();
1089 }
1090
1091 bool hasLabels() const {
1092 return !Labels.empty();
1093 }
1094
1095 void rescopeLabels();
1096 };
1097
1098 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1099
1100 /// The class used to assign some variables some temporarily addresses.
1102 DeclMapTy SavedLocals;
1103 DeclMapTy SavedTempAddresses;
1104 OMPMapVars(const OMPMapVars &) = delete;
1105 void operator=(const OMPMapVars &) = delete;
1106
1107 public:
1108 explicit OMPMapVars() = default;
1110 assert(SavedLocals.empty() && "Did not restored original addresses.");
1111 };
1112
1113 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1114 /// function \p CGF.
1115 /// \return true if at least one variable was set already, false otherwise.
1116 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1117 Address TempAddr) {
1118 LocalVD = LocalVD->getCanonicalDecl();
1119 // Only save it once.
1120 if (SavedLocals.count(LocalVD)) return false;
1121
1122 // Copy the existing local entry to SavedLocals.
1123 auto it = CGF.LocalDeclMap.find(LocalVD);
1124 if (it != CGF.LocalDeclMap.end())
1125 SavedLocals.try_emplace(LocalVD, it->second);
1126 else
1127 SavedLocals.try_emplace(LocalVD, Address::invalid());
1128
1129 // Generate the private entry.
1130 QualType VarTy = LocalVD->getType();
1131 if (VarTy->isReferenceType()) {
1132 Address Temp = CGF.CreateMemTemp(VarTy);
1133 CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
1134 TempAddr = Temp;
1135 }
1136 SavedTempAddresses.try_emplace(LocalVD, TempAddr);
1137
1138 return true;
1139 }
1140
1141 /// Applies new addresses to the list of the variables.
1142 /// \return true if at least one variable is using new address, false
1143 /// otherwise.
1145 copyInto(SavedTempAddresses, CGF.LocalDeclMap);
1146 SavedTempAddresses.clear();
1147 return !SavedLocals.empty();
1148 }
1149
1150 /// Restores original addresses of the variables.
1152 if (!SavedLocals.empty()) {
1153 copyInto(SavedLocals, CGF.LocalDeclMap);
1154 SavedLocals.clear();
1155 }
1156 }
1157
1158 private:
1159 /// Copy all the entries in the source map over the corresponding
1160 /// entries in the destination, which must exist.
1161 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1162 for (auto &Pair : Src) {
1163 if (!Pair.second.isValid()) {
1164 Dest.erase(Pair.first);
1165 continue;
1166 }
1167
1168 auto I = Dest.find(Pair.first);
1169 if (I != Dest.end())
1170 I->second = Pair.second;
1171 else
1172 Dest.insert(Pair);
1173 }
1174 }
1175 };
1176
1177 /// The scope used to remap some variables as private in the OpenMP loop body
1178 /// (or other captured region emitted without outlining), and to restore old
1179 /// vars back on exit.
1181 OMPMapVars MappedVars;
1182 OMPPrivateScope(const OMPPrivateScope &) = delete;
1183 void operator=(const OMPPrivateScope &) = delete;
1184
1185 public:
1186 /// Enter a new OpenMP private scope.
1188
1189 /// Registers \p LocalVD variable as a private with \p Addr as the address
1190 /// of the corresponding private variable. \p
1191 /// PrivateGen is the address of the generated private variable.
1192 /// \return true if the variable is registered as private, false if it has
1193 /// been privatized already.
1194 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1195 assert(PerformCleanup && "adding private to dead scope");
1196 return MappedVars.setVarAddr(CGF, LocalVD, Addr);
1197 }
1198
1199 /// Privatizes local variables previously registered as private.
1200 /// Registration is separate from the actual privatization to allow
1201 /// initializers use values of the original variables, not the private one.
1202 /// This is important, for example, if the private variable is a class
1203 /// variable initialized by a constructor that references other private
1204 /// variables. But at initialization original variables must be used, not
1205 /// private copies.
1206 /// \return true if at least one variable was privatized, false otherwise.
1207 bool Privatize() { return MappedVars.apply(CGF); }
1208
1211 restoreMap();
1212 }
1213
1214 /// Exit scope - all the mapped variables are restored.
1216 if (PerformCleanup)
1217 ForceCleanup();
1218 }
1219
1220 /// Checks if the global variable is captured in current function.
1221 bool isGlobalVarCaptured(const VarDecl *VD) const {
1222 VD = VD->getCanonicalDecl();
1223 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
1224 }
1225
1226 /// Restore all mapped variables w/o clean up. This is usefully when we want
1227 /// to reference the original variables but don't want the clean up because
1228 /// that could emit lifetime end too early, causing backend issue #56913.
1229 void restoreMap() { MappedVars.restore(CGF); }
1230 };
1231
1232 /// Save/restore original map of previously emitted local vars in case when we
1233 /// need to duplicate emission of the same code several times in the same
1234 /// function for OpenMP code.
1236 CodeGenFunction &CGF;
1237 DeclMapTy SavedMap;
1238
1239 public:
1241 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1242 ~OMPLocalDeclMapRAII() { SavedMap.swap(CGF.LocalDeclMap); }
1243 };
1244
1245 /// Takes the old cleanup stack size and emits the cleanup blocks
1246 /// that have been added.
1247 void
1249 std::initializer_list<llvm::Value **> ValuesToReload = {});
1250
1251 /// Takes the old cleanup stack size and emits the cleanup blocks
1252 /// that have been added, then adds all lifetime-extended cleanups from
1253 /// the given position to the stack.
1254 void
1256 size_t OldLifetimeExtendedStackSize,
1257 std::initializer_list<llvm::Value **> ValuesToReload = {});
1258
1259 void ResolveBranchFixups(llvm::BasicBlock *Target);
1260
1261 /// The given basic block lies in the current EH scope, but may be a
1262 /// target of a potentially scope-crossing jump; get a stable handle
1263 /// to which we can perform this jump later.
1265 return JumpDest(Target,
1268 }
1269
1270 /// The given basic block lies in the current EH scope, but may be a
1271 /// target of a potentially scope-crossing jump; get a stable handle
1272 /// to which we can perform this jump later.
1273 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1275 }
1276
1277 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1278 /// block through the normal cleanup handling code (if any) and then
1279 /// on to \arg Dest.
1281
1282 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1283 /// specified destination obviously has no cleanups to run. 'false' is always
1284 /// a conservatively correct answer for this method.
1286
1287 /// popCatchScope - Pops the catch scope at the top of the EHScope
1288 /// stack, emitting any required code (other than the catch handlers
1289 /// themselves).
1291
1292 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1294 llvm::BasicBlock *
1296
1297 /// An object to manage conditionally-evaluated expressions.
1299 llvm::BasicBlock *StartBB;
1300
1301 public:
1303 : StartBB(CGF.Builder.GetInsertBlock()) {}
1304
1306 assert(CGF.OutermostConditional != this);
1307 if (!CGF.OutermostConditional)
1308 CGF.OutermostConditional = this;
1309 }
1310
1312 assert(CGF.OutermostConditional != nullptr);
1313 if (CGF.OutermostConditional == this)
1314 CGF.OutermostConditional = nullptr;
1315 }
1316
1317 /// Returns a block which will be executed prior to each
1318 /// evaluation of the conditional code.
1319 llvm::BasicBlock *getStartingBlock() const {
1320 return StartBB;
1321 }
1322 };
1323
1324 /// isInConditionalBranch - Return true if we're currently emitting
1325 /// one branch or the other of a conditional expression.
1326 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1327
1328 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1329 CodeGenFunction &CGF) {
1330 assert(isInConditionalBranch());
1331 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1332 auto store =
1333 new llvm::StoreInst(value, addr.emitRawPointer(CGF), &block->back());
1334 store->setAlignment(addr.getAlignment().getAsAlign());
1335 }
1336
1337 /// An RAII object to record that we're evaluating a statement
1338 /// expression.
1340 CodeGenFunction &CGF;
1341
1342 /// We have to save the outermost conditional: cleanups in a
1343 /// statement expression aren't conditional just because the
1344 /// StmtExpr is.
1345 ConditionalEvaluation *SavedOutermostConditional;
1346
1347 public:
1349 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1350 CGF.OutermostConditional = nullptr;
1351 }
1352
1354 CGF.OutermostConditional = SavedOutermostConditional;
1355 CGF.EnsureInsertPoint();
1356 }
1357 };
1358
1359 /// An object which temporarily prevents a value from being
1360 /// destroyed by aggressive peephole optimizations that assume that
1361 /// all uses of a value have been realized in the IR.
1363 llvm::Instruction *Inst = nullptr;
1364 friend class CodeGenFunction;
1365
1366 public:
1368 };
1369
1370 /// A non-RAII class containing all the information about a bound
1371 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1372 /// this which makes individual mappings very simple; using this
1373 /// class directly is useful when you have a variable number of
1374 /// opaque values or don't want the RAII functionality for some
1375 /// reason.
1377 const OpaqueValueExpr *OpaqueValue;
1378 bool BoundLValue;
1380
1382 bool boundLValue)
1383 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1384 public:
1385 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1386
1387 static bool shouldBindAsLValue(const Expr *expr) {
1388 // gl-values should be bound as l-values for obvious reasons.
1389 // Records should be bound as l-values because IR generation
1390 // always keeps them in memory. Expressions of function type
1391 // act exactly like l-values but are formally required to be
1392 // r-values in C.
1393 return expr->isGLValue() ||
1394 expr->getType()->isFunctionType() ||
1395 hasAggregateEvaluationKind(expr->getType());
1396 }
1397
1399 const OpaqueValueExpr *ov,
1400 const Expr *e) {
1401 if (shouldBindAsLValue(ov))
1402 return bind(CGF, ov, CGF.EmitLValue(e));
1403 return bind(CGF, ov, CGF.EmitAnyExpr(e));
1404 }
1405
1407 const OpaqueValueExpr *ov,
1408 const LValue &lv) {
1409 assert(shouldBindAsLValue(ov));
1410 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1411 return OpaqueValueMappingData(ov, true);
1412 }
1413
1415 const OpaqueValueExpr *ov,
1416 const RValue &rv) {
1417 assert(!shouldBindAsLValue(ov));
1418 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1419
1420 OpaqueValueMappingData data(ov, false);
1421
1422 // Work around an extremely aggressive peephole optimization in
1423 // EmitScalarConversion which assumes that all other uses of a
1424 // value are extant.
1425 data.Protection = CGF.protectFromPeepholes(rv);
1426
1427 return data;
1428 }
1429
1430 bool isValid() const { return OpaqueValue != nullptr; }
1431 void clear() { OpaqueValue = nullptr; }
1432
1434 assert(OpaqueValue && "no data to unbind!");
1435
1436 if (BoundLValue) {
1437 CGF.OpaqueLValues.erase(OpaqueValue);
1438 } else {
1439 CGF.OpaqueRValues.erase(OpaqueValue);
1440 CGF.unprotectFromPeepholes(Protection);
1441 }
1442 }
1443 };
1444
1445 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1447 CodeGenFunction &CGF;
1449
1450 public:
1451 static bool shouldBindAsLValue(const Expr *expr) {
1453 }
1454
1455 /// Build the opaque value mapping for the given conditional
1456 /// operator if it's the GNU ?: extension. This is a common
1457 /// enough pattern that the convenience operator is really
1458 /// helpful.
1459 ///
1461 const AbstractConditionalOperator *op) : CGF(CGF) {
1462 if (isa<ConditionalOperator>(op))
1463 // Leave Data empty.
1464 return;
1465
1466 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1468 e->getCommon());
1469 }
1470
1471 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1472 /// expression is set to the expression the OVE represents.
1474 : CGF(CGF) {
1475 if (OV) {
1476 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1477 "for OVE with no source expression");
1479 }
1480 }
1481
1483 const OpaqueValueExpr *opaqueValue,
1484 LValue lvalue)
1485 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1486 }
1487
1489 const OpaqueValueExpr *opaqueValue,
1490 RValue rvalue)
1491 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1492 }
1493
1494 void pop() {
1495 Data.unbind(CGF);
1496 Data.clear();
1497 }
1498
1500 if (Data.isValid()) Data.unbind(CGF);
1501 }
1502 };
1503
1504private:
1505 CGDebugInfo *DebugInfo;
1506 /// Used to create unique names for artificial VLA size debug info variables.
1507 unsigned VLAExprCounter = 0;
1508 bool DisableDebugInfo = false;
1509
1510 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1511 /// calling llvm.stacksave for multiple VLAs in the same scope.
1512 bool DidCallStackSave = false;
1513
1514 /// IndirectBranch - The first time an indirect goto is seen we create a block
1515 /// with an indirect branch. Every time we see the address of a label taken,
1516 /// we add the label to the indirect goto. Every subsequent indirect goto is
1517 /// codegen'd as a jump to the IndirectBranch's basic block.
1518 llvm::IndirectBrInst *IndirectBranch = nullptr;
1519
1520 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1521 /// decls.
1522 DeclMapTy LocalDeclMap;
1523
1524 // Keep track of the cleanups for callee-destructed parameters pushed to the
1525 // cleanup stack so that they can be deactivated later.
1526 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1527 CalleeDestructedParamCleanups;
1528
1529 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1530 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1531 /// parameter.
1532 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1533 SizeArguments;
1534
1535 /// Track escaped local variables with auto storage. Used during SEH
1536 /// outlining to produce a call to llvm.localescape.
1537 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1538
1539 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1540 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1541
1542 // BreakContinueStack - This keeps track of where break and continue
1543 // statements should jump to.
1544 struct BreakContinue {
1545 BreakContinue(JumpDest Break, JumpDest Continue)
1546 : BreakBlock(Break), ContinueBlock(Continue) {}
1547
1548 JumpDest BreakBlock;
1549 JumpDest ContinueBlock;
1550 };
1551 SmallVector<BreakContinue, 8> BreakContinueStack;
1552
1553 /// Handles cancellation exit points in OpenMP-related constructs.
1554 class OpenMPCancelExitStack {
1555 /// Tracks cancellation exit point and join point for cancel-related exit
1556 /// and normal exit.
1557 struct CancelExit {
1558 CancelExit() = default;
1559 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1560 JumpDest ContBlock)
1561 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1562 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1563 /// true if the exit block has been emitted already by the special
1564 /// emitExit() call, false if the default codegen is used.
1565 bool HasBeenEmitted = false;
1566 JumpDest ExitBlock;
1567 JumpDest ContBlock;
1568 };
1569
1570 SmallVector<CancelExit, 8> Stack;
1571
1572 public:
1573 OpenMPCancelExitStack() : Stack(1) {}
1574 ~OpenMPCancelExitStack() = default;
1575 /// Fetches the exit block for the current OpenMP construct.
1576 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1577 /// Emits exit block with special codegen procedure specific for the related
1578 /// OpenMP construct + emits code for normal construct cleanup.
1579 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1580 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1581 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1582 assert(CGF.getOMPCancelDestination(Kind).isValid());
1583 assert(CGF.HaveInsertPoint());
1584 assert(!Stack.back().HasBeenEmitted);
1585 auto IP = CGF.Builder.saveAndClearIP();
1586 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1587 CodeGen(CGF);
1588 CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1589 CGF.Builder.restoreIP(IP);
1590 Stack.back().HasBeenEmitted = true;
1591 }
1592 CodeGen(CGF);
1593 }
1594 /// Enter the cancel supporting \a Kind construct.
1595 /// \param Kind OpenMP directive that supports cancel constructs.
1596 /// \param HasCancel true, if the construct has inner cancel directive,
1597 /// false otherwise.
1598 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1599 Stack.push_back({Kind,
1600 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1601 : JumpDest(),
1602 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1603 : JumpDest()});
1604 }
1605 /// Emits default exit point for the cancel construct (if the special one
1606 /// has not be used) + join point for cancel/normal exits.
1607 void exit(CodeGenFunction &CGF) {
1608 if (getExitBlock().isValid()) {
1609 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1610 bool HaveIP = CGF.HaveInsertPoint();
1611 if (!Stack.back().HasBeenEmitted) {
1612 if (HaveIP)
1613 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1614 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1615 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1616 }
1617 CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1618 if (!HaveIP) {
1619 CGF.Builder.CreateUnreachable();
1620 CGF.Builder.ClearInsertionPoint();
1621 }
1622 }
1623 Stack.pop_back();
1624 }
1625 };
1626 OpenMPCancelExitStack OMPCancelStack;
1627
1628 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1629 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1630 Stmt::Likelihood LH);
1631
1632 CodeGenPGO PGO;
1633
1634 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1635 Address MCDCCondBitmapAddr = Address::invalid();
1636
1637 /// Calculate branch weights appropriate for PGO data
1638 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1639 uint64_t FalseCount) const;
1640 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1641 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1642 uint64_t LoopCount) const;
1643
1644public:
1645 /// Increment the profiler's counter for the given statement by \p StepV.
1646 /// If \p StepV is null, the default increment is 1.
1647 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1649 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
1650 !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile))
1651 PGO.emitCounterSetOrIncrement(Builder, S, StepV);
1652 PGO.setCurrentStmt(S);
1653 }
1654
1657 CGM.getCodeGenOpts().MCDCCoverage &&
1658 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
1659 }
1660
1661 /// Allocate a temp value on the stack that MCDC can use to track condition
1662 /// results.
1664 if (isMCDCCoverageEnabled()) {
1665 PGO.emitMCDCParameters(Builder);
1666 MCDCCondBitmapAddr =
1667 CreateIRTemp(getContext().UnsignedIntTy, "mcdc.addr");
1668 }
1669 }
1670
1671 bool isBinaryLogicalOp(const Expr *E) const {
1672 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
1673 return (BOp && BOp->isLogicalOp());
1674 }
1675
1676 /// Zero-init the MCDC temp value.
1679 PGO.emitMCDCCondBitmapReset(Builder, E, MCDCCondBitmapAddr);
1680 PGO.setCurrentStmt(E);
1681 }
1682 }
1683
1684 /// Increment the profiler's counter for the given expression by \p StepV.
1685 /// If \p StepV is null, the default increment is 1.
1688 PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this);
1689 PGO.setCurrentStmt(E);
1690 }
1691 }
1692
1693 /// Update the MCDC temp value with the condition's evaluated result.
1694 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
1695 if (isMCDCCoverageEnabled()) {
1696 PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this);
1697 PGO.setCurrentStmt(E);
1698 }
1699 }
1700
1701 /// Get the profiler's count for the given statement.
1702 uint64_t getProfileCount(const Stmt *S) {
1703 return PGO.getStmtCount(S).value_or(0);
1704 }
1705
1706 /// Set the profiler's current count.
1707 void setCurrentProfileCount(uint64_t Count) {
1708 PGO.setCurrentRegionCount(Count);
1709 }
1710
1711 /// Get the profiler's current count. This is generally the count for the most
1712 /// recently incremented counter.
1714 return PGO.getCurrentRegionCount();
1715 }
1716
1717private:
1718
1719 /// SwitchInsn - This is nearest current switch instruction. It is null if
1720 /// current context is not in a switch.
1721 llvm::SwitchInst *SwitchInsn = nullptr;
1722 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1723 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1724
1725 /// The likelihood attributes of the SwitchCase.
1726 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1727
1728 /// CaseRangeBlock - This block holds if condition check for last case
1729 /// statement range in current switch instruction.
1730 llvm::BasicBlock *CaseRangeBlock = nullptr;
1731
1732 /// OpaqueLValues - Keeps track of the current set of opaque value
1733 /// expressions.
1734 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1735 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1736
1737 // VLASizeMap - This keeps track of the associated size for each VLA type.
1738 // We track this by the size expression rather than the type itself because
1739 // in certain situations, like a const qualifier applied to an VLA typedef,
1740 // multiple VLA types can share the same size expression.
1741 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1742 // enter/leave scopes.
1743 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1744
1745 /// A block containing a single 'unreachable' instruction. Created
1746 /// lazily by getUnreachableBlock().
1747 llvm::BasicBlock *UnreachableBlock = nullptr;
1748
1749 /// Counts of the number return expressions in the function.
1750 unsigned NumReturnExprs = 0;
1751
1752 /// Count the number of simple (constant) return expressions in the function.
1753 unsigned NumSimpleReturnExprs = 0;
1754
1755 /// The last regular (non-return) debug location (breakpoint) in the function.
1756 SourceLocation LastStopPoint;
1757
1758public:
1759 /// Source location information about the default argument or member
1760 /// initializer expression we're evaluating, if any.
1764
1765 /// A scope within which we are constructing the fields of an object which
1766 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1767 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1769 public:
1771 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1772 CGF.CXXDefaultInitExprThis = This;
1773 }
1775 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1776 }
1777
1778 private:
1779 CodeGenFunction &CGF;
1780 Address OldCXXDefaultInitExprThis;
1781 };
1782
1783 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1784 /// is overridden to be the object under construction.
1786 public:
1788 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1789 OldCXXThisAlignment(CGF.CXXThisAlignment),
1791 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1792 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1793 }
1795 CGF.CXXThisValue = OldCXXThisValue;
1796 CGF.CXXThisAlignment = OldCXXThisAlignment;
1797 }
1798
1799 public:
1801 llvm::Value *OldCXXThisValue;
1804 };
1805
1809 };
1810
1811 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1812 /// current loop index is overridden.
1814 public:
1815 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1816 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1817 CGF.ArrayInitIndex = Index;
1818 }
1820 CGF.ArrayInitIndex = OldArrayInitIndex;
1821 }
1822
1823 private:
1824 CodeGenFunction &CGF;
1825 llvm::Value *OldArrayInitIndex;
1826 };
1827
1829 public:
1831 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1832 OldCurCodeDecl(CGF.CurCodeDecl),
1833 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1834 OldCXXABIThisValue(CGF.CXXABIThisValue),
1835 OldCXXThisValue(CGF.CXXThisValue),
1836 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1837 OldCXXThisAlignment(CGF.CXXThisAlignment),
1838 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1839 OldCXXInheritedCtorInitExprArgs(
1840 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1841 CGF.CurGD = GD;
1842 CGF.CurFuncDecl = CGF.CurCodeDecl =
1843 cast<CXXConstructorDecl>(GD.getDecl());
1844 CGF.CXXABIThisDecl = nullptr;
1845 CGF.CXXABIThisValue = nullptr;
1846 CGF.CXXThisValue = nullptr;
1847 CGF.CXXABIThisAlignment = CharUnits();
1848 CGF.CXXThisAlignment = CharUnits();
1850 CGF.FnRetTy = QualType();
1851 CGF.CXXInheritedCtorInitExprArgs.clear();
1852 }
1854 CGF.CurGD = OldCurGD;
1855 CGF.CurFuncDecl = OldCurFuncDecl;
1856 CGF.CurCodeDecl = OldCurCodeDecl;
1857 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1858 CGF.CXXABIThisValue = OldCXXABIThisValue;
1859 CGF.CXXThisValue = OldCXXThisValue;
1860 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1861 CGF.CXXThisAlignment = OldCXXThisAlignment;
1862 CGF.ReturnValue = OldReturnValue;
1863 CGF.FnRetTy = OldFnRetTy;
1864 CGF.CXXInheritedCtorInitExprArgs =
1865 std::move(OldCXXInheritedCtorInitExprArgs);
1866 }
1867
1868 private:
1869 CodeGenFunction &CGF;
1870 GlobalDecl OldCurGD;
1871 const Decl *OldCurFuncDecl;
1872 const Decl *OldCurCodeDecl;
1873 ImplicitParamDecl *OldCXXABIThisDecl;
1874 llvm::Value *OldCXXABIThisValue;
1875 llvm::Value *OldCXXThisValue;
1876 CharUnits OldCXXABIThisAlignment;
1877 CharUnits OldCXXThisAlignment;
1878 Address OldReturnValue;
1879 QualType OldFnRetTy;
1880 CallArgList OldCXXInheritedCtorInitExprArgs;
1881 };
1882
1883 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1884 // region body, and finalization codegen callbacks. This will class will also
1885 // contain privatization functions used by the privatization call backs
1886 //
1887 // TODO: this is temporary class for things that are being moved out of
1888 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1889 // utility function for use with the OMPBuilder. Once that move to use the
1890 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1891 // directly, or a new helper class that will contain functions used by both
1892 // this and the OMPBuilder
1893
1895
1899
1900 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1901
1902 /// Cleanup action for allocate support.
1904
1905 private:
1906 llvm::CallInst *RTLFnCI;
1907
1908 public:
1909 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1910 RLFnCI->removeFromParent();
1911 }
1912
1913 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1914 if (!CGF.HaveInsertPoint())
1915 return;
1916 CGF.Builder.Insert(RTLFnCI);
1917 }
1918 };
1919
1920 /// Returns address of the threadprivate variable for the current
1921 /// thread. This Also create any necessary OMP runtime calls.
1922 ///
1923 /// \param VD VarDecl for Threadprivate variable.
1924 /// \param VDAddr Address of the Vardecl
1925 /// \param Loc The location where the barrier directive was encountered
1927 const VarDecl *VD, Address VDAddr,
1928 SourceLocation Loc);
1929
1930 /// Gets the OpenMP-specific address of the local variable /p VD.
1932 const VarDecl *VD);
1933 /// Get the platform-specific name separator.
1934 /// \param Parts different parts of the final name that needs separation
1935 /// \param FirstSeparator First separator used between the initial two
1936 /// parts of the name.
1937 /// \param Separator separator used between all of the rest consecutinve
1938 /// parts of the name
1939 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1940 StringRef FirstSeparator = ".",
1941 StringRef Separator = ".");
1942 /// Emit the Finalization for an OMP region
1943 /// \param CGF The Codegen function this belongs to
1944 /// \param IP Insertion point for generating the finalization code.
1946 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1947 assert(IP.getBlock()->end() != IP.getPoint() &&
1948 "OpenMP IR Builder should cause terminated block!");
1949
1950 llvm::BasicBlock *IPBB = IP.getBlock();
1951 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1952 assert(DestBB && "Finalization block should have one successor!");
1953
1954 // erase and replace with cleanup branch.
1955 IPBB->getTerminator()->eraseFromParent();
1956 CGF.Builder.SetInsertPoint(IPBB);
1958 CGF.EmitBranchThroughCleanup(Dest);
1959 }
1960
1961 /// Emit the body of an OMP region
1962 /// \param CGF The Codegen function this belongs to
1963 /// \param RegionBodyStmt The body statement for the OpenMP region being
1964 /// generated
1965 /// \param AllocaIP Where to insert alloca instructions
1966 /// \param CodeGenIP Where to insert the region code
1967 /// \param RegionName Name to be used for new blocks
1969 const Stmt *RegionBodyStmt,
1970 InsertPointTy AllocaIP,
1971 InsertPointTy CodeGenIP,
1972 Twine RegionName);
1973
1974 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
1975 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
1977 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1978 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
1979 CodeGenIPBBTI->eraseFromParent();
1980
1981 CGF.Builder.SetInsertPoint(CodeGenIPBB);
1982
1983 if (Fn->doesNotThrow())
1984 CGF.EmitNounwindRuntimeCall(Fn, Args);
1985 else
1986 CGF.EmitRuntimeCall(Fn, Args);
1987
1988 if (CGF.Builder.saveIP().isSet())
1989 CGF.Builder.CreateBr(&FiniBB);
1990 }
1991
1992 /// Emit the body of an OMP region that will be outlined in
1993 /// OpenMPIRBuilder::finalize().
1994 /// \param CGF The Codegen function this belongs to
1995 /// \param RegionBodyStmt The body statement for the OpenMP region being
1996 /// generated
1997 /// \param AllocaIP Where to insert alloca instructions
1998 /// \param CodeGenIP Where to insert the region code
1999 /// \param RegionName Name to be used for new blocks
2001 const Stmt *RegionBodyStmt,
2002 InsertPointTy AllocaIP,
2003 InsertPointTy CodeGenIP,
2004 Twine RegionName);
2005
2006 /// RAII for preserving necessary info during Outlined region body codegen.
2008
2009 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2010 CodeGenFunction::JumpDest OldReturnBlock;
2011 CodeGenFunction &CGF;
2012
2013 public:
2015 llvm::BasicBlock &RetBB)
2016 : CGF(cgf) {
2017 assert(AllocaIP.isSet() &&
2018 "Must specify Insertion point for allocas of outlined function");
2019 OldAllocaIP = CGF.AllocaInsertPt;
2020 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2021
2022 OldReturnBlock = CGF.ReturnBlock;
2023 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
2024 }
2025
2027 CGF.AllocaInsertPt = OldAllocaIP;
2028 CGF.ReturnBlock = OldReturnBlock;
2029 }
2030 };
2031
2032 /// RAII for preserving necessary info during inlined region body codegen.
2034
2035 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2036 CodeGenFunction &CGF;
2037
2038 public:
2040 llvm::BasicBlock &FiniBB)
2041 : CGF(cgf) {
2042 // Alloca insertion block should be in the entry block of the containing
2043 // function so it expects an empty AllocaIP in which case will reuse the
2044 // old alloca insertion point, or a new AllocaIP in the same block as
2045 // the old one
2046 assert((!AllocaIP.isSet() ||
2047 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2048 "Insertion point should be in the entry block of containing "
2049 "function!");
2050 OldAllocaIP = CGF.AllocaInsertPt;
2051 if (AllocaIP.isSet())
2052 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2053
2054 // TODO: Remove the call, after making sure the counter is not used by
2055 // the EHStack.
2056 // Since this is an inlined region, it should not modify the
2057 // ReturnBlock, and should reuse the one for the enclosing outlined
2058 // region. So, the JumpDest being return by the function is discarded
2059 (void)CGF.getJumpDestInCurrentScope(&FiniBB);
2060 }
2061
2063 };
2064 };
2065
2066private:
2067 /// CXXThisDecl - When generating code for a C++ member function,
2068 /// this will hold the implicit 'this' declaration.
2069 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2070 llvm::Value *CXXABIThisValue = nullptr;
2071 llvm::Value *CXXThisValue = nullptr;
2072 CharUnits CXXABIThisAlignment;
2073 CharUnits CXXThisAlignment;
2074
2075 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2076 /// this expression.
2077 Address CXXDefaultInitExprThis = Address::invalid();
2078
2079 /// The current array initialization index when evaluating an
2080 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2081 llvm::Value *ArrayInitIndex = nullptr;
2082
2083 /// The values of function arguments to use when evaluating
2084 /// CXXInheritedCtorInitExprs within this context.
2085 CallArgList CXXInheritedCtorInitExprArgs;
2086
2087 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2088 /// destructor, this will hold the implicit argument (e.g. VTT).
2089 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2090 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2091
2092 /// OutermostConditional - Points to the outermost active
2093 /// conditional control. This is used so that we know if a
2094 /// temporary should be destroyed conditionally.
2095 ConditionalEvaluation *OutermostConditional = nullptr;
2096
2097 /// The current lexical scope.
2098 LexicalScope *CurLexicalScope = nullptr;
2099
2100 /// The current source location that should be used for exception
2101 /// handling code.
2102 SourceLocation CurEHLocation;
2103
2104 /// BlockByrefInfos - For each __block variable, contains
2105 /// information about the layout of the variable.
2106 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2107
2108 /// Used by -fsanitize=nullability-return to determine whether the return
2109 /// value can be checked.
2110 llvm::Value *RetValNullabilityPrecondition = nullptr;
2111
2112 /// Check if -fsanitize=nullability-return instrumentation is required for
2113 /// this function.
2114 bool requiresReturnValueNullabilityCheck() const {
2115 return RetValNullabilityPrecondition;
2116 }
2117
2118 /// Used to store precise source locations for return statements by the
2119 /// runtime return value checks.
2120 Address ReturnLocation = Address::invalid();
2121
2122 /// Check if the return value of this function requires sanitization.
2123 bool requiresReturnValueCheck() const;
2124
2125 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2126 bool hasInAllocaArg(const CXXMethodDecl *MD);
2127
2128 llvm::BasicBlock *TerminateLandingPad = nullptr;
2129 llvm::BasicBlock *TerminateHandler = nullptr;
2131
2132 /// Terminate funclets keyed by parent funclet pad.
2133 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2134
2135 /// Largest vector width used in ths function. Will be used to create a
2136 /// function attribute.
2137 unsigned LargestVectorWidth = 0;
2138
2139 /// True if we need emit the life-time markers. This is initially set in
2140 /// the constructor, but could be overwritten to true if this is a coroutine.
2141 bool ShouldEmitLifetimeMarkers;
2142
2143 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2144 /// the function metadata.
2145 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2146
2147public:
2148 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
2150
2151 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2152 ASTContext &getContext() const { return CGM.getContext(); }
2154 if (DisableDebugInfo)
2155 return nullptr;
2156 return DebugInfo;
2157 }
2158 void disableDebugInfo() { DisableDebugInfo = true; }
2159 void enableDebugInfo() { DisableDebugInfo = false; }
2160
2162 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2163 }
2164
2165 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2166
2167 /// Returns a pointer to the function's exception object and selector slot,
2168 /// which is assigned in every landing pad.
2171
2172 /// Returns the contents of the function's exception object and selector
2173 /// slots.
2174 llvm::Value *getExceptionFromSlot();
2175 llvm::Value *getSelectorFromSlot();
2176
2178
2179 llvm::BasicBlock *getUnreachableBlock() {
2180 if (!UnreachableBlock) {
2181 UnreachableBlock = createBasicBlock("unreachable");
2182 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2183 }
2184 return UnreachableBlock;
2185 }
2186
2187 llvm::BasicBlock *getInvokeDest() {
2188 if (!EHStack.requiresLandingPad()) return nullptr;
2189 return getInvokeDestImpl();
2190 }
2191
2192 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2193
2194 const TargetInfo &getTarget() const { return Target; }
2195 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2197 return CGM.getTargetCodeGenInfo();
2198 }
2199
2200 //===--------------------------------------------------------------------===//
2201 // Cleanups
2202 //===--------------------------------------------------------------------===//
2203
2204 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2205
2206 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2207 Address arrayEndPointer,
2208 QualType elementType,
2209 CharUnits elementAlignment,
2210 Destroyer *destroyer);
2211 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2212 llvm::Value *arrayEnd,
2213 QualType elementType,
2214 CharUnits elementAlignment,
2215 Destroyer *destroyer);
2216
2218 Address addr, QualType type);
2220 Address addr, QualType type);
2222 Destroyer *destroyer, bool useEHCleanupForArray);
2224 Address addr, QualType type);
2226 QualType type, Destroyer *destroyer,
2227 bool useEHCleanupForArray);
2229 QualType type, Destroyer *destroyer,
2230 bool useEHCleanupForArray);
2231 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2232 llvm::Value *CompletePtr,
2233 QualType ElementType);
2236 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2238 bool useEHCleanupForArray);
2240 Destroyer *destroyer,
2241 bool useEHCleanupForArray,
2242 const VarDecl *VD);
2243 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2244 QualType elementType, CharUnits elementAlign,
2245 Destroyer *destroyer,
2246 bool checkZeroLength, bool useEHCleanup);
2247
2249
2250 /// Determines whether an EH cleanup is required to destroy a type
2251 /// with the given destruction kind.
2253 switch (kind) {
2254 case QualType::DK_none:
2255 return false;
2259 return getLangOpts().Exceptions;
2261 return getLangOpts().Exceptions &&
2262 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2263 }
2264 llvm_unreachable("bad destruction kind");
2265 }
2266
2269 }
2270
2271 //===--------------------------------------------------------------------===//
2272 // Objective-C
2273 //===--------------------------------------------------------------------===//
2274
2276
2278
2279 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2281 const ObjCPropertyImplDecl *PID);
2283 const ObjCPropertyImplDecl *propImpl,
2284 const ObjCMethodDecl *GetterMothodDecl,
2285 llvm::Constant *AtomicHelperFn);
2286
2288 ObjCMethodDecl *MD, bool ctor);
2289
2290 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2291 /// for the given property.
2293 const ObjCPropertyImplDecl *PID);
2295 const ObjCPropertyImplDecl *propImpl,
2296 llvm::Constant *AtomicHelperFn);
2297
2298 //===--------------------------------------------------------------------===//
2299 // Block Bits
2300 //===--------------------------------------------------------------------===//
2301
2302 /// Emit block literal.
2303 /// \return an LLVM value which is a pointer to a struct which contains
2304 /// information about the block, including the block invoke function, the
2305 /// captured variables, etc.
2306 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2307
2309 const CGBlockInfo &Info,
2310 const DeclMapTy &ldm,
2311 bool IsLambdaConversionToBlock,
2312 bool BuildGlobalBlock);
2313
2314 /// Check if \p T is a C++ class that has a destructor that can throw.
2316
2317 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2318 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2320 const ObjCPropertyImplDecl *PID);
2322 const ObjCPropertyImplDecl *PID);
2323 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2324
2325 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2326 bool CanThrow);
2327
2328 class AutoVarEmission;
2329
2331
2332 /// Enter a cleanup to destroy a __block variable. Note that this
2333 /// cleanup should be a no-op if the variable hasn't left the stack
2334 /// yet; if a cleanup is required for the variable itself, that needs
2335 /// to be done externally.
2336 ///
2337 /// \param Kind Cleanup kind.
2338 ///
2339 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2340 /// structure that will be passed to _Block_object_dispose. When
2341 /// \p LoadBlockVarAddr is true, the address of the field of the block
2342 /// structure that holds the address of the __block structure.
2343 ///
2344 /// \param Flags The flag that will be passed to _Block_object_dispose.
2345 ///
2346 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2347 /// \p Addr to get the address of the __block structure.
2349 bool LoadBlockVarAddr, bool CanThrow);
2350
2351 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2352 llvm::Value *ptr);
2353
2356
2357 /// BuildBlockByrefAddress - Computes the location of the
2358 /// data in a variable which is declared as __block.
2360 bool followForward = true);
2362 const BlockByrefInfo &info,
2363 bool followForward,
2364 const llvm::Twine &name);
2365
2367
2369
2370 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2371 const CGFunctionInfo &FnInfo);
2372
2373 /// Annotate the function with an attribute that disables TSan checking at
2374 /// runtime.
2375 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2376
2377 /// Emit code for the start of a function.
2378 /// \param Loc The location to be associated with the function.
2379 /// \param StartLoc The location of the function body.
2381 QualType RetTy,
2382 llvm::Function *Fn,
2383 const CGFunctionInfo &FnInfo,
2384 const FunctionArgList &Args,
2386 SourceLocation StartLoc = SourceLocation());
2387
2389
2393 void EmitFunctionBody(const Stmt *Body);
2394 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2395
2396 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2397 CallArgList &CallArgs,
2398 const CGFunctionInfo *CallOpFnInfo = nullptr,
2399 llvm::Constant *CallOpFn = nullptr);
2403 CallArgList &CallArgs);
2405 const CGFunctionInfo **ImplFnInfo,
2406 llvm::Function **ImplFn);
2409 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2410 }
2411 void EmitAsanPrologueOrEpilogue(bool Prologue);
2412
2413 /// Emit the unified return block, trying to avoid its emission when
2414 /// possible.
2415 /// \return The debug location of the user written return statement if the
2416 /// return block is avoided.
2417 llvm::DebugLoc EmitReturnBlock();
2418
2419 /// FinishFunction - Complete IR generation of the current function. It is
2420 /// legal to call this function even if there is no current insertion point.
2422
2423 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2424 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2425
2426 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2427 const ThunkInfo *Thunk, bool IsUnprototyped);
2428
2430
2431 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2432 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2433 llvm::FunctionCallee Callee);
2434
2435 /// Generate a thunk for the given method.
2436 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2437 GlobalDecl GD, const ThunkInfo &Thunk,
2438 bool IsUnprototyped);
2439
2440 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2441 const CGFunctionInfo &FnInfo,
2442 GlobalDecl GD, const ThunkInfo &Thunk);
2443
2445 FunctionArgList &Args);
2446
2448
2449 /// Struct with all information about dynamic [sub]class needed to set vptr.
2450 struct VPtr {
2455 };
2456
2457 /// Initialize the vtable pointer of the given subobject.
2459
2461
2464
2466 CharUnits OffsetFromNearestVBase,
2467 bool BaseIsNonVirtualPrimaryBase,
2468 const CXXRecordDecl *VTableClass,
2469 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2470
2472
2473 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2474 /// to by This.
2475 llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
2476 const CXXRecordDecl *VTableClass);
2477
2486 };
2487
2488 /// Derived is the presumed address of an object of type T after a
2489 /// cast. If T is a polymorphic class type, emit a check that the virtual
2490 /// table for Derived belongs to a class derived from T.
2491 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2493
2494 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2495 /// If vptr CFI is enabled, emit a check that VTable is valid.
2496 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2498
2499 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2500 /// RD using llvm.type.test.
2501 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2503
2504 /// If whole-program virtual table optimization is enabled, emit an assumption
2505 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2506 /// enabled, emit a check that VTable is a member of RD's type identifier.
2508 llvm::Value *VTable, SourceLocation Loc);
2509
2510 /// Returns whether we should perform a type checked load when loading a
2511 /// virtual function for virtual calls to members of RD. This is generally
2512 /// true when both vcall CFI and whole-program-vtables are enabled.
2514
2515 /// Emit a type checked load from the given vtable.
2517 llvm::Value *VTable,
2518 llvm::Type *VTableTy,
2519 uint64_t VTableByteOffset);
2520
2521 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2522 /// given phase of destruction for a destructor. The end result
2523 /// should call destructors on members and base classes in reverse
2524 /// order of their construction.
2526
2527 /// ShouldInstrumentFunction - Return true if the current function should be
2528 /// instrumented with __cyg_profile_func_* calls
2530
2531 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2532 /// should not be instrumented with sanitizers.
2534
2535 /// ShouldXRayInstrument - Return true if the current function should be
2536 /// instrumented with XRay nop sleds.
2538
2539 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2540 /// XRay custom event handling calls.
2542
2543 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2544 /// XRay typed event handling calls.
2546
2547 /// Return a type hash constant for a function instrumented by
2548 /// -fsanitize=function.
2549 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2550
2551 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2552 /// arguments for the given function. This is also responsible for naming the
2553 /// LLVM function arguments.
2555 llvm::Function *Fn,
2556 const FunctionArgList &Args);
2557
2558 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2559 /// given temporary.
2560 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2561 SourceLocation EndLoc);
2562
2563 /// Emit a test that checks if the return value \p RV is nonnull.
2564 void EmitReturnValueCheck(llvm::Value *RV);
2565
2566 /// EmitStartEHSpec - Emit the start of the exception spec.
2567 void EmitStartEHSpec(const Decl *D);
2568
2569 /// EmitEndEHSpec - Emit the end of the exception spec.
2570 void EmitEndEHSpec(const Decl *D);
2571
2572 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2573 llvm::BasicBlock *getTerminateLandingPad();
2574
2575 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2576 /// terminate.
2577 llvm::BasicBlock *getTerminateFunclet();
2578
2579 /// getTerminateHandler - Return a handler (not a landing pad, just
2580 /// a catch handler) that just calls terminate. This is used when
2581 /// a terminate scope encloses a try.
2582 llvm::BasicBlock *getTerminateHandler();
2583
2585 llvm::Type *ConvertType(QualType T);
2586 llvm::Type *ConvertType(const TypeDecl *T) {
2587 return ConvertType(getContext().getTypeDeclType(T));
2588 }
2589
2590 /// LoadObjCSelf - Load the value of self. This function is only valid while
2591 /// generating code for an Objective-C method.
2592 llvm::Value *LoadObjCSelf();
2593
2594 /// TypeOfSelfObject - Return type of object that this self represents.
2596
2597 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2599
2601 return getEvaluationKind(T) == TEK_Scalar;
2602 }
2603
2606 }
2607
2608 /// createBasicBlock - Create an LLVM basic block.
2609 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2610 llvm::Function *parent = nullptr,
2611 llvm::BasicBlock *before = nullptr) {
2612 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2613 }
2614
2615 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2616 /// label maps to.
2618
2619 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2620 /// another basic block, simplify it. This assumes that no other code could
2621 /// potentially reference the basic block.
2622 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2623
2624 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2625 /// adding a fall-through branch from the current insert block if
2626 /// necessary. It is legal to call this function even if there is no current
2627 /// insertion point.
2628 ///
2629 /// IsFinished - If true, indicates that the caller has finished emitting
2630 /// branches to the given block and does not expect to emit code into it. This
2631 /// means the block can be ignored if it is unreachable.
2632 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2633
2634 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2635 /// near its uses, and leave the insertion point in it.
2636 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2637
2638 /// EmitBranch - Emit a branch to the specified basic block from the current
2639 /// insert block, taking care to avoid creation of branches from dummy
2640 /// blocks. It is legal to call this function even if there is no current
2641 /// insertion point.
2642 ///
2643 /// This function clears the current insertion point. The caller should follow
2644 /// calls to this function with calls to Emit*Block prior to generation new
2645 /// code.
2646 void EmitBranch(llvm::BasicBlock *Block);
2647
2648 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2649 /// indicates that the current code being emitted is unreachable.
2650 bool HaveInsertPoint() const {
2651 return Builder.GetInsertBlock() != nullptr;
2652 }
2653
2654 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2655 /// emitted IR has a place to go. Note that by definition, if this function
2656 /// creates a block then that block is unreachable; callers may do better to
2657 /// detect when no insertion point is defined and simply skip IR generation.
2659 if (!HaveInsertPoint())
2661 }
2662
2663 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2664 /// specified stmt yet.
2665 void ErrorUnsupported(const Stmt *S, const char *Type);
2666
2667 //===--------------------------------------------------------------------===//
2668 // Helpers
2669 //===--------------------------------------------------------------------===//
2670
2672 llvm::BasicBlock *LHSBlock,
2673 llvm::BasicBlock *RHSBlock,
2674 llvm::BasicBlock *MergeBlock,
2675 QualType MergedType) {
2676 Builder.SetInsertPoint(MergeBlock);
2677 llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
2678 PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
2679 PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
2680 LHS.replaceBasePointer(PtrPhi);
2681 LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
2682 return LHS;
2683 }
2684
2685 /// Construct an address with the natural alignment of T. If a pointer to T
2686 /// is expected to be signed, the pointer passed to this function must have
2687 /// been signed, and the returned Address will have the pointer authentication
2688 /// information needed to authenticate the signed pointer.
2690 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2691 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2692 TBAAAccessInfo *TBAAInfo = nullptr,
2693 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2694 if (Alignment.isZero())
2695 Alignment =
2696 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
2697 return Address(Ptr, ConvertTypeForMem(T), Alignment, nullptr,
2698 IsKnownNonNull);
2699 }
2700
2703 return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
2705 }
2706
2708 TBAAAccessInfo TBAAInfo) {
2709 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2710 }
2711
2712 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2714 return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
2716 }
2717
2718 /// Same as MakeAddrLValue above except that the pointer is known to be
2719 /// unsigned.
2720 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2722 Address Addr(V, ConvertTypeForMem(T), Alignment);
2723 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2725 }
2726
2727 LValue
2730 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2731 TBAAAccessInfo());
2732 }
2733
2734 /// Given a value of type T* that may not be to a complete object, construct
2735 /// an l-value with the natural pointee alignment of T.
2737
2739
2740 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2741 /// to be unsigned.
2743
2745
2747 LValueBaseInfo *PointeeBaseInfo = nullptr,
2748 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2751 AlignmentSource Source =
2753 LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2754 CGM.getTBAAAccessInfo(RefTy));
2755 return EmitLoadOfReferenceLValue(RefLVal);
2756 }
2757
2758 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2759 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2760 /// it is loaded from.
2762 LValueBaseInfo *BaseInfo = nullptr,
2763 TBAAAccessInfo *TBAAInfo = nullptr);
2765
2766private:
2767 struct AllocaTracker {
2768 void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
2769 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2770
2771 private:
2773 };
2774 AllocaTracker *Allocas = nullptr;
2775
2776public:
2777 // Captures all the allocas created during the scope of its RAII object.
2780 : CGF(CGF), OldTracker(CGF.Allocas) {
2781 CGF.Allocas = &Tracker;
2782 }
2783 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2784
2785 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2786
2787 private:
2788 CodeGenFunction &CGF;
2789 AllocaTracker *OldTracker;
2790 AllocaTracker Tracker;
2791 };
2792
2793 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2794 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2795 /// insertion point of the builder. The caller is responsible for setting an
2796 /// appropriate alignment on
2797 /// the alloca.
2798 ///
2799 /// \p ArraySize is the number of array elements to be allocated if it
2800 /// is not nullptr.
2801 ///
2802 /// LangAS::Default is the address space of pointers to local variables and
2803 /// temporaries, as exposed in the source language. In certain
2804 /// configurations, this is not the same as the alloca address space, and a
2805 /// cast is needed to lift the pointer from the alloca AS into
2806 /// LangAS::Default. This can happen when the target uses a restricted
2807 /// address space for the stack but the source language requires
2808 /// LangAS::Default to be a generic address space. The latter condition is
2809 /// common for most programming languages; OpenCL is an exception in that
2810 /// LangAS::Default is the private address space, which naturally maps
2811 /// to the stack.
2812 ///
2813 /// Because the address of a temporary is often exposed to the program in
2814 /// various ways, this function will perform the cast. The original alloca
2815 /// instruction is returned through \p Alloca if it is not nullptr.
2816 ///
2817 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2818 /// more efficient if the caller knows that the address will not be exposed.
2819 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2820 llvm::Value *ArraySize = nullptr);
2822 const Twine &Name = "tmp",
2823 llvm::Value *ArraySize = nullptr,
2824 RawAddress *Alloca = nullptr);
2826 const Twine &Name = "tmp",
2827 llvm::Value *ArraySize = nullptr);
2828
2829 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2830 /// default ABI alignment of the given LLVM type.
2831 ///
2832 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2833 /// any given AST type that happens to have been lowered to the
2834 /// given IR type. This should only ever be used for function-local,
2835 /// IR-driven manipulations like saving and restoring a value. Do
2836 /// not hand this address off to arbitrary IRGen routines, and especially
2837 /// do not pass it as an argument to a function that might expect a
2838 /// properly ABI-aligned value.
2840 const Twine &Name = "tmp");
2841
2842 /// CreateIRTemp - Create a temporary IR object of the given type, with
2843 /// appropriate alignment. This routine should only be used when an temporary
2844 /// value needs to be stored into an alloca (for example, to avoid explicit
2845 /// PHI construction), but the type is the IR type, not the type appropriate
2846 /// for storing in memory.
2847 ///
2848 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2849 /// ConvertType instead of ConvertTypeForMem.
2850 RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
2851
2852 /// CreateMemTemp - Create a temporary memory object of the given type, with
2853 /// appropriate alignmen and cast it to the default address space. Returns
2854 /// the original alloca instruction by \p Alloca if it is not nullptr.
2855 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2856 RawAddress *Alloca = nullptr);
2858 const Twine &Name = "tmp",
2859 RawAddress *Alloca = nullptr);
2860
2861 /// CreateMemTemp - Create a temporary memory object of the given type, with
2862 /// appropriate alignmen without casting it to the default address space.
2863 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2865 const Twine &Name = "tmp");
2866
2867 /// CreateAggTemp - Create a temporary memory object for the given
2868 /// aggregate type.
2869 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2870 RawAddress *Alloca = nullptr) {
2871 return AggValueSlot::forAddr(
2872 CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
2875 }
2876
2877 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2878 /// expression and compare the result against zero, returning an Int1Ty value.
2879 llvm::Value *EvaluateExprAsBool(const Expr *E);
2880
2881 /// Retrieve the implicit cast expression of the rhs in a binary operator
2882 /// expression by passing pointers to Value and QualType
2883 /// This is used for implicit bitfield conversion checks, which
2884 /// must compare with the value before potential truncation.
2886 llvm::Value **Previous,
2887 QualType *SrcType);
2888
2889 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2890 /// so we use the value after conversion.
2891 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2892 llvm::Value *Dst, QualType DstType,
2893 const CGBitFieldInfo &Info,
2894 SourceLocation Loc);
2895
2896 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2897 void EmitIgnoredExpr(const Expr *E);
2898
2899 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2900 /// any type. The result is returned as an RValue struct. If this is an
2901 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2902 /// the result should be returned.
2903 ///
2904 /// \param ignoreResult True if the resulting value isn't used.
2907 bool ignoreResult = false);
2908
2909 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2910 // or the value of the expression, depending on how va_list is defined.
2912
2913 /// Emit a "reference" to a __builtin_ms_va_list; this is
2914 /// always the value of the expression, because a __builtin_ms_va_list is a
2915 /// pointer to a char.
2917
2918 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2919 /// always be accessible even if no aggregate location is provided.
2921
2922 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2923 /// arbitrary expression into the given memory location.
2924 void EmitAnyExprToMem(const Expr *E, Address Location,
2925 Qualifiers Quals, bool IsInitializer);
2926
2927 void EmitAnyExprToExn(const Expr *E, Address Addr);
2928
2929 /// EmitExprAsInit - Emits the code necessary to initialize a
2930 /// location in memory with the given initializer.
2931 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2932 bool capturedByInit);
2933
2934 /// hasVolatileMember - returns true if aggregate type has a volatile
2935 /// member.
2937 if (const RecordType *RT = T->getAs<RecordType>()) {
2938 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2939 return RD->hasVolatileMember();
2940 }
2941 return false;
2942 }
2943
2944 /// Determine whether a return value slot may overlap some other object.
2946 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2947 // class subobjects. These cases may need to be revisited depending on the
2948 // resolution of the relevant core issue.
2950 }
2951
2952 /// Determine whether a field initialization may overlap some other object.
2954
2955 /// Determine whether a base class initialization may overlap some other
2956 /// object.
2958 const CXXRecordDecl *BaseRD,
2959 bool IsVirtual);
2960
2961 /// Emit an aggregate assignment.
2963 bool IsVolatile = hasVolatileMember(EltTy);
2964 EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2965 }
2966
2968 AggValueSlot::Overlap_t MayOverlap) {
2969 EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2970 }
2971
2972 /// EmitAggregateCopy - Emit an aggregate copy.
2973 ///
2974 /// \param isVolatile \c true iff either the source or the destination is
2975 /// volatile.
2976 /// \param MayOverlap Whether the tail padding of the destination might be
2977 /// occupied by some other object. More efficient code can often be
2978 /// generated if not.
2980 AggValueSlot::Overlap_t MayOverlap,
2981 bool isVolatile = false);
2982
2983 /// GetAddrOfLocalVar - Return the address of a local variable.
2985 auto it = LocalDeclMap.find(VD);
2986 assert(it != LocalDeclMap.end() &&
2987 "Invalid argument to GetAddrOfLocalVar(), no decl!");
2988 return it->second;
2989 }
2990
2991 /// Given an opaque value expression, return its LValue mapping if it exists,
2992 /// otherwise create one.
2994
2995 /// Given an opaque value expression, return its RValue mapping if it exists,
2996 /// otherwise create one.
2998
2999 /// Get the index of the current ArrayInitLoopExpr, if any.
3000 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3001
3002 /// getAccessedFieldNo - Given an encoded value and a result number, return
3003 /// the input field number being accessed.
3004 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3005
3006 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3007 llvm::BasicBlock *GetIndirectGotoBlock();
3008
3009 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3010 static bool IsWrappedCXXThis(const Expr *E);
3011
3012 /// EmitNullInitialization - Generate code to set a value of the given type to
3013 /// null, If the type contains data member pointers, they will be initialized
3014 /// to -1 in accordance with the Itanium C++ ABI.
3016
3017 /// Emits a call to an LLVM variable-argument intrinsic, either
3018 /// \c llvm.va_start or \c llvm.va_end.
3019 /// \param ArgValue A reference to the \c va_list as emitted by either
3020 /// \c EmitVAListRef or \c EmitMSVAListRef.
3021 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3022 /// calls \c llvm.va_end.
3023 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3024
3025 /// Generate code to get an argument from the passed in pointer
3026 /// and update it accordingly.
3027 /// \param VE The \c VAArgExpr for which to generate code.
3028 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3029 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3030 /// \returns A pointer to the argument.
3031 // FIXME: We should be able to get rid of this method and use the va_arg
3032 // instruction in LLVM instead once it works well enough.
3034
3035 /// emitArrayLength - Compute the length of an array, even if it's a
3036 /// VLA, and drill down to the base element type.
3038 QualType &baseType,
3039 Address &addr);
3040
3041 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3042 /// the given variably-modified type and store them in the VLASizeMap.
3043 ///
3044 /// This function can be called with a null (unreachable) insert point.
3046
3048 llvm::Value *NumElts;
3050
3051 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3052 };
3053
3054 /// Return the number of elements for a single dimension
3055 /// for the given array type.
3058
3059 /// Returns an LLVM value that corresponds to the size,
3060 /// in non-variably-sized elements, of a variable length array type,
3061 /// plus that largest non-variably-sized element type. Assumes that
3062 /// the type has already been emitted with EmitVariablyModifiedType.
3065
3066 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3067 /// generating code for an C++ member function.
3068 llvm::Value *LoadCXXThis() {
3069 assert(CXXThisValue && "no 'this' value for this function");
3070 return CXXThisValue;
3071 }
3073
3074 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3075 /// virtual bases.
3076 // FIXME: Every place that calls LoadCXXVTT is something
3077 // that needs to be abstracted properly.
3078 llvm::Value *LoadCXXVTT() {
3079 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3080 return CXXStructorImplicitParamValue;
3081 }
3082
3083 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3084 /// complete class to the given direct base.
3085 Address
3087 const CXXRecordDecl *Derived,
3088 const CXXRecordDecl *Base,
3089 bool BaseIsVirtual);
3090
3091 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3092
3093 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3094 /// load of 'this' and returns address of the base class.
3096 const CXXRecordDecl *Derived,
3099 bool NullCheckValue, SourceLocation Loc);
3100
3102 const CXXRecordDecl *Derived,
3105 bool NullCheckValue);
3106
3107 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3108 /// base constructor/destructor with virtual bases.
3109 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3110 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3111 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3112 bool Delegating);
3113
3115 CXXCtorType CtorType,
3116 const FunctionArgList &Args,
3117 SourceLocation Loc);
3118 // It's important not to confuse this and the previous function. Delegating
3119 // constructors are the C++0x feature. The constructor delegate optimization
3120 // is used to reduce duplication in the base and complete consturctors where
3121 // they are substantially the same.
3123 const FunctionArgList &Args);
3124
3125 /// Emit a call to an inheriting constructor (that is, one that invokes a
3126 /// constructor inherited from a base class) by inlining its definition. This
3127 /// is necessary if the ABI does not support forwarding the arguments to the
3128 /// base class constructor (because they're variadic or similar).
3130 CXXCtorType CtorType,
3131 bool ForVirtualBase,
3132 bool Delegating,
3133 CallArgList &Args);
3134
3135 /// Emit a call to a constructor inherited from a base class, passing the
3136 /// current constructor's arguments along unmodified (without even making
3137 /// a copy).
3139 bool ForVirtualBase, Address This,
3140 bool InheritedFromVBase,
3141 const CXXInheritedCtorInitExpr *E);
3142
3144 bool ForVirtualBase, bool Delegating,
3145 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3146
3148 bool ForVirtualBase, bool Delegating,
3149 Address This, CallArgList &Args,
3151 SourceLocation Loc, bool NewPointerIsChecked);
3152
3153 /// Emit assumption load for all bases. Requires to be called only on
3154 /// most-derived class and not under construction of the object.
3156
3157 /// Emit assumption that vptr load == global vtable.
3158 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3159
3161 Address This, Address Src,
3162 const CXXConstructExpr *E);
3163
3165 const ArrayType *ArrayTy,
3166 Address ArrayPtr,
3167 const CXXConstructExpr *E,
3168 bool NewPointerIsChecked,
3169 bool ZeroInitialization = false);
3170
3172 llvm::Value *NumElements,
3173 Address ArrayPtr,
3174 const CXXConstructExpr *E,
3175 bool NewPointerIsChecked,
3176 bool ZeroInitialization = false);
3177
3179
3181 bool ForVirtualBase, bool Delegating, Address This,
3182 QualType ThisTy);
3183
3184 void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
3185 llvm::Type *ElementTy, Address NewPtr,
3186 llvm::Value *NumElements,
3187 llvm::Value *AllocSizeWithoutCookie);
3188
3189 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3190 Address Ptr);
3191
3196
3197 llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
3198 void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
3199
3200 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3202
3203 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3204 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3205 CharUnits CookieSize = CharUnits());
3206
3208 const CallExpr *TheCallExpr, bool IsDelete);
3209
3210 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3211 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3213
3214 /// Situations in which we might emit a check for the suitability of a
3215 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3216 /// compiler-rt.
3218 /// Checking the operand of a load. Must be suitably sized and aligned.
3220 /// Checking the destination of a store. Must be suitably sized and aligned.
3222 /// Checking the bound value in a reference binding. Must be suitably sized
3223 /// and aligned, but is not required to refer to an object (until the
3224 /// reference is used), per core issue 453.
3226 /// Checking the object expression in a non-static data member access. Must
3227 /// be an object within its lifetime.
3229 /// Checking the 'this' pointer for a call to a non-static member function.
3230 /// Must be an object within its lifetime.
3232 /// Checking the 'this' pointer for a constructor call.
3234 /// Checking the operand of a static_cast to a derived pointer type. Must be
3235 /// null or an object within its lifetime.
3237 /// Checking the operand of a static_cast to a derived reference type. Must
3238 /// be an object within its lifetime.
3240 /// Checking the operand of a cast to a base object. Must be suitably sized
3241 /// and aligned.
3243 /// Checking the operand of a cast to a virtual base object. Must be an
3244 /// object within its lifetime.
3246 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3248 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3249 /// null or an object within its lifetime.
3252
3253 /// Determine whether the pointer type check \p TCK permits null pointers.
3255
3256 /// Determine whether the pointer type check \p TCK requires a vptr check.
3258
3259 /// Whether any type-checking sanitizers are enabled. If \c false,
3260 /// calls to EmitTypeCheck can be skipped.
3262
3264 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3265 llvm::Value *ArraySize = nullptr) {
3267 return;
3268 EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
3269 SkippedChecks, ArraySize);
3270 }
3271
3273 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3274 SanitizerSet SkippedChecks = SanitizerSet(),
3275 llvm::Value *ArraySize = nullptr) {
3277 return;
3278 EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
3279 SkippedChecks, ArraySize);
3280 }
3281
3282 /// Emit a check that \p V is the address of storage of the
3283 /// appropriate size and alignment for an object of type \p Type
3284 /// (or if ArraySize is provided, for an array of that bound).
3285 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
3286 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3287 SanitizerSet SkippedChecks = SanitizerSet(),
3288 llvm::Value *ArraySize = nullptr);
3289
3290 /// Emit a check that \p Base points into an array object, which
3291 /// we can access at index \p Index. \p Accessed should be \c false if we
3292 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3293 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
3294 QualType IndexType, bool Accessed);
3295 void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
3296 llvm::Value *Index, QualType IndexType,
3297 QualType IndexedType, bool Accessed);
3298
3299 // Find a struct's flexible array member. It may be embedded inside multiple
3300 // sub-structs, but must still be the last field.
3302 const RecordDecl *RD,
3303 StringRef Name,
3304 uint64_t &Offset);
3305
3306 /// Find the FieldDecl specified in a FAM's "counted_by" attribute. Returns
3307 /// \p nullptr if either the attribute or the field doesn't exist.
3309
3310 /// Build an expression accessing the "counted_by" field.
3311 llvm::Value *EmitCountedByFieldExpr(const Expr *Base,
3312 const FieldDecl *FAMDecl,
3313 const FieldDecl *CountDecl);
3314
3316 bool isInc, bool isPre);
3318 bool isInc, bool isPre);
3319
3320 /// Converts Location to a DebugLoc, if debug information is enabled.
3321 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3322
3323 /// Get the record field index as represented in debug info.
3324 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3325
3326
3327 //===--------------------------------------------------------------------===//
3328 // Declaration Emission
3329 //===--------------------------------------------------------------------===//
3330
3331 /// EmitDecl - Emit a declaration.
3332 ///
3333 /// This function can be called with a null (unreachable) insert point.
3334 void EmitDecl(const Decl &D);
3335
3336 /// EmitVarDecl - Emit a local variable declaration.
3337 ///
3338 /// This function can be called with a null (unreachable) insert point.
3339 void EmitVarDecl(const VarDecl &D);
3340
3341 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3342 bool capturedByInit);
3343
3345 llvm::Value *Address);
3346
3347 /// Determine whether the given initializer is trivial in the sense
3348 /// that it requires no code to be generated.
3350
3351 /// EmitAutoVarDecl - Emit an auto variable declaration.
3352 ///
3353 /// This function can be called with a null (unreachable) insert point.
3354 void EmitAutoVarDecl(const VarDecl &D);
3355
3357 friend class CodeGenFunction;
3358
3359 const VarDecl *Variable;
3360
3361 /// The address of the alloca for languages with explicit address space
3362 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3363 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3364 /// as a global constant.
3365 Address Addr;
3366
3367 llvm::Value *NRVOFlag;
3368
3369 /// True if the variable is a __block variable that is captured by an
3370 /// escaping block.
3371 bool IsEscapingByRef;
3372
3373 /// True if the variable is of aggregate type and has a constant
3374 /// initializer.
3375 bool IsConstantAggregate;
3376
3377 /// Non-null if we should use lifetime annotations.
3378 llvm::Value *SizeForLifetimeMarkers;
3379
3380 /// Address with original alloca instruction. Invalid if the variable was
3381 /// emitted as a global constant.
3382 RawAddress AllocaAddr;
3383
3384 struct Invalid {};
3385 AutoVarEmission(Invalid)
3386 : Variable(nullptr), Addr(Address::invalid()),
3387 AllocaAddr(RawAddress::invalid()) {}
3388
3389 AutoVarEmission(const VarDecl &variable)
3390 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3391 IsEscapingByRef(false), IsConstantAggregate(false),
3392 SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
3393
3394 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3395
3396 public:
3397 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3398
3399 bool useLifetimeMarkers() const {
3400 return SizeForLifetimeMarkers != nullptr;
3401 }
3402 llvm::Value *getSizeForLifetimeMarkers() const {
3403 assert(useLifetimeMarkers());
3404 return SizeForLifetimeMarkers;
3405 }
3406
3407 /// Returns the raw, allocated address, which is not necessarily
3408 /// the address of the object itself. It is casted to default
3409 /// address space for address space agnostic languages.
3411 return Addr;
3412 }
3413
3414 /// Returns the address for the original alloca instruction.
3415 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3416
3417 /// Returns the address of the object within this declaration.
3418 /// Note that this does not chase the forwarding pointer for
3419 /// __block decls.
3421 if (!IsEscapingByRef) return Addr;
3422
3423 return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
3424 }
3425 };
3427 void EmitAutoVarInit(const AutoVarEmission &emission);
3430 QualType::DestructionKind dtorKind);
3431
3432 /// Emits the alloca and debug information for the size expressions for each
3433 /// dimension of an array. It registers the association of its (1-dimensional)
3434 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3435 /// reference this node when creating the DISubrange object to describe the
3436 /// array types.
3438 const VarDecl &D,
3439 bool EmitDebugInfo);
3440
3442 llvm::GlobalValue::LinkageTypes Linkage);
3443
3445 union {
3447 llvm::Value *Value;
3448 };
3449
3450 bool IsIndirect;
3451
3452 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3453 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3454
3455 public:
3456 static ParamValue forDirect(llvm::Value *value) {
3457 return ParamValue(value);
3458 }
3460 assert(!addr.getAlignment().isZero());
3461 return ParamValue(addr);
3462 }
3463
3464 bool isIndirect() const { return IsIndirect; }
3465 llvm::Value *getAnyValue() const {
3466 if (!isIndirect())
3467 return Value;
3468 assert(!Addr.hasOffset() && "unexpected offset");
3469 return Addr.getBasePointer();
3470 }
3471
3472 llvm::Value *getDirectValue() const {
3473 assert(!isIndirect());
3474 return Value;
3475 }
3476
3478 assert(isIndirect());
3479 return Addr;
3480 }
3481 };
3482
3483 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3484 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3485
3486 /// protectFromPeepholes - Protect a value that we're intending to
3487 /// store to the side, but which will probably be used later, from
3488 /// aggressive peepholing optimizations that might delete it.
3489 ///
3490 /// Pass the result to unprotectFromPeepholes to declare that
3491 /// protection is no longer required.
3492 ///
3493 /// There's no particular reason why this shouldn't apply to
3494 /// l-values, it's just that no existing peepholes work on pointers.
3497
3498 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3499 SourceLocation Loc,
3500 SourceLocation AssumptionLoc,
3501 llvm::Value *Alignment,
3502 llvm::Value *OffsetValue,
3503 llvm::Value *TheCheck,
3504 llvm::Instruction *Assumption);
3505
3506 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3507 SourceLocation Loc, SourceLocation AssumptionLoc,
3508 llvm::Value *Alignment,
3509 llvm::Value *OffsetValue = nullptr);
3510
3511 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3512 SourceLocation AssumptionLoc,
3513 llvm::Value *Alignment,
3514 llvm::Value *OffsetValue = nullptr);
3515
3516 //===--------------------------------------------------------------------===//
3517 // Statement Emission
3518 //===--------------------------------------------------------------------===//
3519
3520 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3521 void EmitStopPoint(const Stmt *S);
3522
3523 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3524 /// this function even if there is no current insertion point.
3525 ///
3526 /// This function may clear the current insertion point; callers should use
3527 /// EnsureInsertPoint if they wish to subsequently generate code without first
3528 /// calling EmitBlock, EmitBranch, or EmitStmt.
3529 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = std::nullopt);
3530
3531 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3532 /// necessarily require an insertion point or debug information; typically
3533 /// because the statement amounts to a jump or a container of other
3534 /// statements.
3535 ///
3536 /// \return True if the statement was handled.
3538
3539 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3542 bool GetLast = false,
3543 AggValueSlot AVS =
3545
3546 /// EmitLabel - Emit the block for the given label. It is legal to call this
3547 /// function even if there is no current insertion point.
3548 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3549
3550 void EmitLabelStmt(const LabelStmt &S);
3552 void EmitGotoStmt(const GotoStmt &S);
3554 void EmitIfStmt(const IfStmt &S);
3555
3557 ArrayRef<const Attr *> Attrs = std::nullopt);
3558 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = std::nullopt);
3559 void EmitForStmt(const ForStmt &S,
3560 ArrayRef<const Attr *> Attrs = std::nullopt);
3562 void EmitDeclStmt(const DeclStmt &S);
3563 void EmitBreakStmt(const BreakStmt &S);
3569 void EmitAsmStmt(const AsmStmt &S);
3570
3576
3581 bool ignoreResult = false);
3585 bool ignoreResult = false);
3587 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3588
3589 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3590 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3591
3597 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3599
3601 llvm::Function *FinallyFunc);
3603 const Stmt *OutlinedStmt);
3604
3606 const SEHExceptStmt &Except);
3607
3609 const SEHFinallyStmt &Finally);
3610
3612 llvm::Value *ParentFP,
3613 llvm::Value *EntryEBP);
3614 llvm::Value *EmitSEHExceptionCode();
3615 llvm::Value *EmitSEHExceptionInfo();
3617
3618 /// Emit simple code for OpenMP directives in Simd-only mode.
3620
3621 /// Scan the outlined statement for captures from the parent function. For
3622 /// each capture, mark the capture as escaped and emit a call to
3623 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3625 bool IsFilter);
3626
3627 /// Recovers the address of a local in a parent function. ParentVar is the
3628 /// address of the variable used in the immediate parent function. It can
3629 /// either be an alloca or a call to llvm.localrecover if there are nested
3630 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3631 /// frame.
3633 Address ParentVar,
3634 llvm::Value *ParentFP);
3635
3637 ArrayRef<const Attr *> Attrs = std::nullopt);
3638
3639 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3641 CodeGenFunction &CGF;
3642
3643 public:
3645 bool HasCancel)
3646 : CGF(CGF) {
3647 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3648 }
3649 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3650 };
3651
3652 /// Returns calculated size of the specified type.
3653 llvm::Value *getTypeSize(QualType Ty);
3659 SourceLocation Loc);
3661 SmallVectorImpl<llvm::Value *> &CapturedVars);
3662 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3663 SourceLocation Loc);
3664 /// Perform element by element copying of arrays with type \a
3665 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3666 /// generated by \a CopyGen.
3667 ///
3668 /// \param DestAddr Address of the destination array.
3669 /// \param SrcAddr Address of the source array.
3670 /// \param OriginalType Type of destination and source arrays.
3671 /// \param CopyGen Copying procedure that copies value of single array element
3672 /// to another single array element.
3674 Address DestAddr, Address SrcAddr, QualType OriginalType,
3675 const llvm::function_ref<void(Address, Address)> CopyGen);
3676 /// Emit proper copying of data from one variable to another.
3677 ///
3678 /// \param OriginalType Original type of the copied variables.
3679 /// \param DestAddr Destination address.
3680 /// \param SrcAddr Source address.
3681 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3682 /// type of the base array element).
3683 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3684 /// the base array element).
3685 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3686 /// DestVD.
3687 void EmitOMPCopy(QualType OriginalType,
3688 Address DestAddr, Address SrcAddr,
3689 const VarDecl *DestVD, const VarDecl *SrcVD,
3690 const Expr *Copy);
3691 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3692 /// \a X = \a E \a BO \a E.
3693 ///
3694 /// \param X Value to be updated.
3695 /// \param E Update value.
3696 /// \param BO Binary operation for update operation.
3697 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3698 /// expression, false otherwise.
3699 /// \param AO Atomic ordering of the generated atomic instructions.
3700 /// \param CommonGen Code generator for complex expressions that cannot be
3701 /// expressed through atomicrmw instruction.
3702 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3703 /// generated, <false, RValue::get(nullptr)> otherwise.
3704 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3705 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3706 llvm::AtomicOrdering AO, SourceLocation Loc,
3707 const llvm::function_ref<RValue(RValue)> CommonGen);
3709 OMPPrivateScope &PrivateScope);
3711 OMPPrivateScope &PrivateScope);
3713 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3714 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3715 CaptureDeviceAddrMap);
3717 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3718 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3719 CaptureDeviceAddrMap);
3720 /// Emit code for copyin clause in \a D directive. The next code is
3721 /// generated at the start of outlined functions for directives:
3722 /// \code
3723 /// threadprivate_var1 = master_threadprivate_var1;
3724 /// operator=(threadprivate_var2, master_threadprivate_var2);
3725 /// ...
3726 /// __kmpc_barrier(&loc, global_tid);
3727 /// \endcode
3728 ///
3729 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3730 /// \returns true if at least one copyin variable is found, false otherwise.
3732 /// Emit initial code for lastprivate variables. If some variable is
3733 /// not also firstprivate, then the default initialization is used. Otherwise
3734 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3735 /// method.
3736 ///
3737 /// \param D Directive that may have 'lastprivate' directives.
3738 /// \param PrivateScope Private scope for capturing lastprivate variables for
3739 /// proper codegen in internal captured statement.
3740 ///
3741 /// \returns true if there is at least one lastprivate variable, false
3742 /// otherwise.
3744 OMPPrivateScope &PrivateScope);
3745 /// Emit final copying of lastprivate values to original variables at
3746 /// the end of the worksharing or simd directive.
3747 ///
3748 /// \param D Directive that has at least one 'lastprivate' directives.
3749 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3750 /// it is the last iteration of the loop code in associated directive, or to
3751 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3753 bool NoFinals,
3754 llvm::Value *IsLastIterCond = nullptr);
3755 /// Emit initial code for linear clauses.
3757 CodeGenFunction::OMPPrivateScope &PrivateScope);
3758 /// Emit final code for linear clauses.
3759 /// \param CondGen Optional conditional code for final part of codegen for
3760 /// linear clause.
3762 const OMPLoopDirective &D,
3763 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3764 /// Emit initial code for reduction variables. Creates reduction copies
3765 /// and initializes them with the values according to OpenMP standard.
3766 ///
3767 /// \param D Directive (possibly) with the 'reduction' clause.
3768 /// \param PrivateScope Private scope for capturing reduction variables for
3769 /// proper codegen in internal captured statement.
3770 ///
3772 OMPPrivateScope &PrivateScope,
3773 bool ForInscan = false);
3774 /// Emit final update of reduction values to original variables at
3775 /// the end of the directive.
3776 ///
3777 /// \param D Directive that has at least one 'reduction' directives.
3778 /// \param ReductionKind The kind of reduction to perform.
3780 const OpenMPDirectiveKind ReductionKind);
3781 /// Emit initial code for linear variables. Creates private copies
3782 /// and initializes them with the values according to OpenMP standard.
3783 ///
3784 /// \param D Directive (possibly) with the 'linear' clause.
3785 /// \return true if at least one linear variable is found that should be
3786 /// initialized with the value of the original variable, false otherwise.
3788
3789 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3790 llvm::Function * /*OutlinedFn*/,
3791 const OMPTaskDataTy & /*Data*/)>
3794 const OpenMPDirectiveKind CapturedRegion,
3795 const RegionCodeGenTy &BodyGen,
3796 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3803 explicit OMPTargetDataInfo() = default;
3806 unsigned NumberOfTargetItems)
3810 };
3812 const RegionCodeGenTy &BodyGen,
3813 OMPTargetDataInfo &InputInfo);
3816 CodeGenFunction &CGF,
3817 const CapturedStmt *CS,
3853 void
3856 void
3863 void
3879 void
3903
3904 /// Emit device code for the target directive.
3906 StringRef ParentName,
3907 const OMPTargetDirective &S);
3908 static void
3911 /// Emit device code for the target parallel for directive.
3913 CodeGenModule &CGM, StringRef ParentName,
3915 /// Emit device code for the target parallel for simd directive.
3917 CodeGenModule &CGM, StringRef ParentName,
3919 /// Emit device code for the target teams directive.
3920 static void
3922 const OMPTargetTeamsDirective &S);
3923 /// Emit device code for the target teams distribute directive.
3925 CodeGenModule &CGM, StringRef ParentName,
3927 /// Emit device code for the target teams distribute simd directive.
3929 CodeGenModule &CGM, StringRef ParentName,
3931 /// Emit device code for the target simd directive.
3933 StringRef ParentName,
3934 const OMPTargetSimdDirective &S);
3935 /// Emit device code for the target teams distribute parallel for simd
3936 /// directive.
3938 CodeGenModule &CGM, StringRef ParentName,
3940
3941 /// Emit device code for the target teams loop directive.
3943 CodeGenModule &CGM, StringRef ParentName,
3945
3946 /// Emit device code for the target parallel loop directive.
3948 CodeGenModule &CGM, StringRef ParentName,
3950
3952 CodeGenModule &CGM, StringRef ParentName,
3954
3955 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
3956 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
3957 /// future it is meant to be the number of loops expected in the loop nests
3958 /// (usually specified by the "collapse" clause) that are collapsed to a
3959 /// single loop by this function.
3960 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
3961 int Depth);
3962
3963 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
3965
3966 /// Emit inner loop of the worksharing/simd construct.
3967 ///
3968 /// \param S Directive, for which the inner loop must be emitted.
3969 /// \param RequiresCleanup true, if directive has some associated private
3970 /// variables.
3971 /// \param LoopCond Bollean condition for loop continuation.
3972 /// \param IncExpr Increment expression for loop control variable.
3973 /// \param BodyGen Generator for the inner body of the inner loop.
3974 /// \param PostIncGen Genrator for post-increment code (required for ordered
3975 /// loop directvies).
3977 const OMPExecutableDirective &S, bool RequiresCleanup,
3978 const Expr *LoopCond, const Expr *IncExpr,
3979 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3980 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3981
3983 /// Emit initial code for loop counters of loop-based directives.
3985 OMPPrivateScope &LoopScope);
3986
3987 /// Helper for the OpenMP loop directives.
3989
3990 /// Emit code for the worksharing loop-based directive.
3991 /// \return true, if this construct has any lastprivate clause, false -
3992 /// otherwise.
3994 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3995 const CodeGenDispatchBoundsTy &CGDispatchBounds);
3996
3997 /// Emit code for the distribute loop-based directive.
3999 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4000
4001 /// Helpers for the OpenMP loop directives.
4004 const OMPLoopDirective &D,
4005 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4006
4007 /// Emits the lvalue for the expression with possibly captured variable.
4009
4010private:
4011 /// Helpers for blocks.
4012 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4013
4014 /// struct with the values to be passed to the OpenMP loop-related functions
4015 struct OMPLoopArguments {
4016 /// loop lower bound
4018 /// loop upper bound
4020 /// loop stride
4022 /// isLastIteration argument for runtime functions
4024 /// Chunk value generated by sema
4025 llvm::Value *Chunk = nullptr;
4026 /// EnsureUpperBound
4027 Expr *EUB = nullptr;
4028 /// IncrementExpression
4029 Expr *IncExpr = nullptr;
4030 /// Loop initialization
4031 Expr *Init = nullptr;
4032 /// Loop exit condition
4033 Expr *Cond = nullptr;
4034 /// Update of LB after a whole chunk has been executed
4035 Expr *NextLB = nullptr;
4036 /// Update of UB after a whole chunk has been executed
4037 Expr *NextUB = nullptr;
4038 /// Distinguish between the for distribute and sections
4039 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4040 OMPLoopArguments() = default;
4041 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4042 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4043 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4044 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4045 Expr *NextUB = nullptr)
4046 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4047 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4048 NextUB(NextUB) {}
4049 };
4050 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4051 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4052 const OMPLoopArguments &LoopArgs,
4053 const CodeGenLoopTy &CodeGenLoop,
4054 const CodeGenOrderedTy &CodeGenOrdered);
4055 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4056 bool IsMonotonic, const OMPLoopDirective &S,
4057 OMPPrivateScope &LoopScope, bool Ordered,
4058 const OMPLoopArguments &LoopArgs,
4059 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4060 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4061 const OMPLoopDirective &S,
4062 OMPPrivateScope &LoopScope,
4063 const OMPLoopArguments &LoopArgs,
4064 const CodeGenLoopTy &CodeGenLoopContent);
4065 /// Emit code for sections directive.
4066 void EmitSections(const OMPExecutableDirective &S);
4067
4068public:
4069 //===--------------------------------------------------------------------===//
4070 // OpenACC Emission
4071 //===--------------------------------------------------------------------===//
4073 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4074 // simply emitting its structured block, but in the future we will implement
4075 // some sort of IR.
4076 EmitStmt(S.getStructuredBlock());
4077 }
4078
4079 //===--------------------------------------------------------------------===//
4080 // LValue Expression Emission
4081 //===--------------------------------------------------------------------===//
4082
4083 /// Create a check that a scalar RValue is non-null.
4085
4086 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4088
4089 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4090 /// and issue an ErrorUnsupported style diagnostic (using the
4091 /// provided Name).
4093 const char *Name);
4094
4095 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4096 /// an ErrorUnsupported style diagnostic (using the provided Name).
4098 const char *Name);
4099
4100 /// EmitLValue - Emit code to compute a designator that specifies the location
4101 /// of the expression.
4102 ///
4103 /// This can return one of two things: a simple address or a bitfield
4104 /// reference. In either case, the LLVM Value* in the LValue structure is
4105 /// guaranteed to be an LLVM pointer type.
4106 ///
4107 /// If this returns a bitfield reference, nothing about the pointee type of
4108 /// the LLVM value is known: For example, it may not be a pointer to an
4109 /// integer.
4110 ///
4111 /// If this returns a normal address, and if the lvalue's C type is fixed
4112 /// size, this method guarantees that the returned pointer type will point to
4113 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4114 /// variable length type, this is not possible.
4115 ///
4117 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4118
4119private:
4120 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4121
4122public:
4123 /// Same as EmitLValue but additionally we generate checking code to
4124 /// guard against undefined behavior. This is only suitable when we know
4125 /// that the address will be used to access the object.
4127
4129 SourceLocation Loc);
4130
4131 void EmitAtomicInit(Expr *E, LValue lvalue);
4132
4134
4137
4139 llvm::AtomicOrdering AO, bool IsVolatile = false,
4141
4142 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
4143
4144 void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
4145 bool IsVolatile, bool isInit);
4146
4147 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
4148 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
4149 llvm::AtomicOrdering Success =
4150 llvm::AtomicOrdering::SequentiallyConsistent,
4151 llvm::AtomicOrdering Failure =
4152 llvm::AtomicOrdering::SequentiallyConsistent,
4153 bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
4154
4155 void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
4156 const llvm::function_ref<RValue(RValue)> &UpdateOp,
4157 bool IsVolatile);
4158
4159 /// EmitToMemory - Change a scalar value from its value
4160 /// representation to its in-memory representation.
4161 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
4162
4163 /// EmitFromMemory - Change a scalar value from its memory
4164 /// representation to its value representation.
4165 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
4166
4167 /// Check if the scalar \p Value is within the valid range for the given
4168 /// type \p Ty.
4169 ///
4170 /// Returns true if a check is needed (even if the range is unknown).
4171 bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
4172 SourceLocation Loc);
4173
4174 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4175 /// care to appropriately convert from the memory representation to
4176 /// the LLVM value representation.
4177 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4178 SourceLocation Loc,
4180