clang 20.0.0git
CodeGenFunction.h
Go to the documentation of this file.
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGDebugInfo.h"
18#include "CGLoopInfo.h"
19#include "CGValue.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "EHScopeStack.h"
23#include "VarBypassDetector.h"
24#include "clang/AST/CharUnits.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class LLVMContext;
51class MDNode;
52class SwitchInst;
53class Twine;
54class Value;
55class CanonicalLoopInfo;
56}
57
58namespace clang {
59class ASTContext;
60class CXXDestructorDecl;
61class CXXForRangeStmt;
62class CXXTryStmt;
63class Decl;
64class LabelDecl;
65class FunctionDecl;
66class FunctionProtoType;
67class LabelStmt;
68class ObjCContainerDecl;
69class ObjCInterfaceDecl;
70class ObjCIvarDecl;
71class ObjCMethodDecl;
72class ObjCImplementationDecl;
73class ObjCPropertyImplDecl;
74class TargetInfo;
75class VarDecl;
76class ObjCForCollectionStmt;
77class ObjCAtTryStmt;
78class ObjCAtThrowStmt;
79class ObjCAtSynchronizedStmt;
80class ObjCAutoreleasePoolStmt;
81class OMPUseDevicePtrClause;
82class OMPUseDeviceAddrClause;
83class SVETypeFlags;
84class OMPExecutableDirective;
85
86namespace analyze_os_log {
87class OSLogBufferLayout;
88}
89
90namespace CodeGen {
91class CodeGenTypes;
92class CGCallee;
93class CGFunctionInfo;
94class CGBlockInfo;
95class CGCXXABI;
96class BlockByrefHelpers;
97class BlockByrefInfo;
98class BlockFieldFlags;
99class RegionCodeGenTy;
100class TargetCodeGenInfo;
101struct OMPTaskDataTy;
102struct CGCoroData;
103
104/// The kind of evaluation to perform on values of a particular
105/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
106/// CGExprAgg?
107///
108/// TODO: should vectors maybe be split out into their own thing?
114
115#define LIST_SANITIZER_CHECKS \
116 SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
117 SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
118 SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
119 SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
120 SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
121 SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
122 SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
123 SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
124 SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
125 SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
126 SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
127 SANITIZER_CHECK(MissingReturn, missing_return, 0) \
128 SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
129 SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
130 SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
131 SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
132 SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
133 SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
134 SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
135 SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
136 SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
137 SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
138 SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
139 SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
140 SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) \
141 SANITIZER_CHECK(BoundsSafety, bounds_safety, 0)
142
144#define SANITIZER_CHECK(Enum, Name, Version) Enum,
146#undef SANITIZER_CHECK
148
149/// Helper class with most of the code for saving a value for a
150/// conditional expression cleanup.
152 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
153
154 /// Answer whether the given value needs extra work to be saved.
155 static bool needsSaving(llvm::Value *value) {
156 if (!value)
157 return false;
158
159 // If it's not an instruction, we don't need to save.
160 if (!isa<llvm::Instruction>(value)) return false;
161
162 // If it's an instruction in the entry block, we don't need to save.
163 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
164 return (block != &block->getParent()->getEntryBlock());
165 }
166
167 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
168 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
169};
170
171/// A partial specialization of DominatingValue for llvm::Values that
172/// might be llvm::Instructions.
173template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
174 typedef T *type;
176 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
177 }
178};
179
180/// A specialization of DominatingValue for Address.
181template <> struct DominatingValue<Address> {
182 typedef Address type;
183
184 struct saved_type {
186 llvm::Type *ElementType;
189 llvm::PointerType *EffectiveType;
190 };
191
192 static bool needsSaving(type value) {
195 return true;
196 return false;
197 }
198 static saved_type save(CodeGenFunction &CGF, type value) {
199 return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
200 value.getElementType(), value.getAlignment(),
201 DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
202 }
204 return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
205 value.ElementType, value.Alignment, CGPointerAuthInfo(),
206 DominatingLLVMValue::restore(CGF, value.Offset));
207 }
208};
209
210/// A specialization of DominatingValue for RValue.
211template <> struct DominatingValue<RValue> {
212 typedef RValue type;
214 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
215 AggregateAddress, ComplexAddress };
216 union {
217 struct {
219 } Vals;
221 };
222 LLVM_PREFERRED_TYPE(Kind)
223 unsigned K : 3;
224
226 : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
227
230 : Vals{Val1, Val2}, K(ComplexAddress) {}
231
232 saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
233 : AggregateAddr(AggregateAddr), K(K) {}
234
235 public:
236 static bool needsSaving(RValue value);
239
240 // implementations in CGCleanup.cpp
241 };
242
243 static bool needsSaving(type value) {
244 return saved_type::needsSaving(value);
245 }
246 static saved_type save(CodeGenFunction &CGF, type value) {
247 return saved_type::save(CGF, value);
248 }
250 return value.restore(CGF);
251 }
252};
253
254/// CodeGenFunction - This class organizes the per-function state that is used
255/// while generating LLVM code.
257 CodeGenFunction(const CodeGenFunction &) = delete;
258 void operator=(const CodeGenFunction &) = delete;
259
260 friend class CGCXXABI;
261public:
262 /// A jump destination is an abstract label, branching to which may
263 /// require a jump out through normal cleanups.
264 struct JumpDest {
265 JumpDest() : Block(nullptr), Index(0) {}
266 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
267 unsigned Index)
268 : Block(Block), ScopeDepth(Depth), Index(Index) {}
269
270 bool isValid() const { return Block != nullptr; }
271 llvm::BasicBlock *getBlock() const { return Block; }
272 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
273 unsigned getDestIndex() const { return Index; }
274
275 // This should be used cautiously.
277 ScopeDepth = depth;
278 }
279
280 private:
281 llvm::BasicBlock *Block;
283 unsigned Index;
284 };
285
286 CodeGenModule &CGM; // Per-module state.
288
289 // For EH/SEH outlined funclets, this field points to parent's CGF
291
292 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
295
296 // Stores variables for which we can't generate correct lifetime markers
297 // because of jumps.
299
300 /// List of recently emitted OMPCanonicalLoops.
301 ///
302 /// Since OMPCanonicalLoops are nested inside other statements (in particular
303 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
304 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
305 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
306 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
307 /// this stack when done. Entering a new loop requires clearing this list; it
308 /// either means we start parsing a new loop nest (in which case the previous
309 /// loop nest goes out of scope) or a second loop in the same level in which
310 /// case it would be ambiguous into which of the two (or more) loops the loop
311 /// nest would extend.
313
314 /// Stack to track the Logical Operator recursion nest for MC/DC.
316
317 /// Stack to track the controlled convergence tokens.
319
320 /// Number of nested loop to be consumed by the last surrounding
321 /// loop-associated directive.
323
324 // CodeGen lambda for loops and support for ordered clause
325 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
326 JumpDest)>
328 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
329 const unsigned, const bool)>
331
332 // Codegen lambda for loop bounds in worksharing loop constructs
333 typedef llvm::function_ref<std::pair<LValue, LValue>(
336
337 // Codegen lambda for loop bounds in dispatch-based loop implementation
338 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
340 Address UB)>
342
343 /// CGBuilder insert helper. This function is called after an
344 /// instruction is created using Builder.
345 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
346 llvm::BasicBlock::iterator InsertPt) const;
347
348 /// CurFuncDecl - Holds the Decl for the current outermost
349 /// non-closure context.
350 const Decl *CurFuncDecl = nullptr;
351 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
352 const Decl *CurCodeDecl = nullptr;
353 const CGFunctionInfo *CurFnInfo = nullptr;
355 llvm::Function *CurFn = nullptr;
356
357 /// Save Parameter Decl for coroutine.
359
360 // Holds coroutine data if the current function is a coroutine. We use a
361 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
362 // in this header.
363 struct CGCoroInfo {
364 std::unique_ptr<CGCoroData> Data;
365 bool InSuspendBlock = false;
366 CGCoroInfo();
367 ~CGCoroInfo();
368 };
370
371 bool isCoroutine() const {
372 return CurCoro.Data != nullptr;
373 }
374
375 bool inSuspendBlock() const {
377 }
378
379 // Holds FramePtr for await_suspend wrapper generation,
380 // so that __builtin_coro_frame call can be lowered
381 // directly to value of its second argument
383 llvm::Value *FramePtr = nullptr;
384 };
386
387 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
388 // It encapsulates SuspendExpr in a function, to separate it's body
389 // from the main coroutine to avoid miscompilations. Intrinisic
390 // is lowered to this function call in CoroSplit pass
391 // Function signature is:
392 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
393 // where type is one of (void, i1, ptr)
394 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
395 Twine const &SuspendPointName,
396 CoroutineSuspendExpr const &S);
397
398 /// CurGD - The GlobalDecl for the current function being compiled.
400
401 /// PrologueCleanupDepth - The cleanup depth enclosing all the
402 /// cleanups associated with the parameters.
404
405 /// ReturnBlock - Unified return block.
407
408 /// ReturnValue - The temporary alloca to hold the return
409 /// value. This is invalid iff the function has no return value.
411
412 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
413 /// This is invalid if sret is not in use.
415
416 /// If a return statement is being visited, this holds the return statment's
417 /// result expression.
418 const Expr *RetExpr = nullptr;
419
420 /// Return true if a label was seen in the current scope.
422 if (CurLexicalScope)
423 return CurLexicalScope->hasLabels();
424 return !LabelMap.empty();
425 }
426
427 /// AllocaInsertPoint - This is an instruction in the entry block before which
428 /// we prefer to insert allocas.
429 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
430
431private:
432 /// PostAllocaInsertPt - This is a place in the prologue where code can be
433 /// inserted that will be dominated by all the static allocas. This helps
434 /// achieve two things:
435 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
436 /// 2. All other prologue code (which are dominated by static allocas) do
437 /// appear in the source order immediately after all static allocas.
438 ///
439 /// PostAllocaInsertPt will be lazily created when it is *really* required.
440 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
441
442public:
443 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
444 /// immediately after AllocaInsertPt.
445 llvm::Instruction *getPostAllocaInsertPoint() {
446 if (!PostAllocaInsertPt) {
447 assert(AllocaInsertPt &&
448 "Expected static alloca insertion point at function prologue");
449 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
450 "EBB should be entry block of the current code gen function");
451 PostAllocaInsertPt = AllocaInsertPt->clone();
452 PostAllocaInsertPt->setName("postallocapt");
453 PostAllocaInsertPt->insertAfter(AllocaInsertPt);
454 }
455
456 return PostAllocaInsertPt;
457 }
458
459 /// API for captured statement code generation.
461 public:
463 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
466 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
467
469 S.getCapturedRecordDecl()->field_begin();
470 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
471 E = S.capture_end();
472 I != E; ++I, ++Field) {
473 if (I->capturesThis())
474 CXXThisFieldDecl = *Field;
475 else if (I->capturesVariable())
476 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
477 else if (I->capturesVariableByCopy())
478 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
479 }
480 }
481
482 virtual ~CGCapturedStmtInfo();
483
484 CapturedRegionKind getKind() const { return Kind; }
485
486 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
487 // Retrieve the value of the context parameter.
488 virtual llvm::Value *getContextValue() const { return ThisValue; }
489
490 /// Lookup the captured field decl for a variable.
491 virtual const FieldDecl *lookup(const VarDecl *VD) const {
492 return CaptureFields.lookup(VD->getCanonicalDecl());
493 }
494
495 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
496 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
497
498 static bool classof(const CGCapturedStmtInfo *) {
499 return true;
500 }
501
502 /// Emit the captured statement body.
503 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
505 CGF.EmitStmt(S);
506 }
507
508 /// Get the name of the capture helper.
509 virtual StringRef getHelperName() const { return "__captured_stmt"; }
510
511 /// Get the CaptureFields
512 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
513 return CaptureFields;
514 }
515
516 private:
517 /// The kind of captured statement being generated.
519
520 /// Keep the map between VarDecl and FieldDecl.
521 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
522
523 /// The base address of the captured record, passed in as the first
524 /// argument of the parallel region function.
525 llvm::Value *ThisValue;
526
527 /// Captured 'this' type.
528 FieldDecl *CXXThisFieldDecl;
529 };
531
532 /// RAII for correct setting/restoring of CapturedStmtInfo.
534 private:
535 CodeGenFunction &CGF;
536 CGCapturedStmtInfo *PrevCapturedStmtInfo;
537 public:
539 CGCapturedStmtInfo *NewCapturedStmtInfo)
540 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
541 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
542 }
543 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
544 };
545
546 /// An abstract representation of regular/ObjC call/message targets.
548 /// The function declaration of the callee.
549 const Decl *CalleeDecl;
550
551 public:
552 AbstractCallee() : CalleeDecl(nullptr) {}
553 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
554 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
555 bool hasFunctionDecl() const {
556 return isa_and_nonnull<FunctionDecl>(CalleeDecl);
557 }
558 const Decl *getDecl() const { return CalleeDecl; }
559 unsigned getNumParams() const {
560 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
561 return FD->getNumParams();
562 return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
563 }
564 const ParmVarDecl *getParamDecl(unsigned I) const {
565 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
566 return FD->getParamDecl(I);
567 return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
568 }
569 };
570
571 /// Sanitizers enabled for this function.
573
574 /// True if CodeGen currently emits code implementing sanitizer checks.
575 bool IsSanitizerScope = false;
576
577 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
579 CodeGenFunction *CGF;
580 public:
583 };
584
585 /// In C++, whether we are code generating a thunk. This controls whether we
586 /// should emit cleanups.
587 bool CurFuncIsThunk = false;
588
589 /// In ARC, whether we should autorelease the return value.
590 bool AutoreleaseResult = false;
591
592 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
593 /// potentially set the return value.
594 bool SawAsmBlock = false;
595
597
598 /// True if the current function is an outlined SEH helper. This can be a
599 /// finally block or filter expression.
601
602 /// True if CodeGen currently emits code inside presereved access index
603 /// region.
605
606 /// True if the current statement has nomerge attribute.
608
609 /// True if the current statement has noinline attribute.
611
612 /// True if the current statement has always_inline attribute.
614
615 /// True if the current statement has noconvergent attribute.
617
618 /// HLSL Branch attribute.
619 HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr =
620 HLSLControlFlowHintAttr::SpellingNotCalculated;
621
622 // The CallExpr within the current statement that the musttail attribute
623 // applies to. nullptr if there is no 'musttail' on the current statement.
624 const CallExpr *MustTailCall = nullptr;
625
626 /// Returns true if a function must make progress, which means the
627 /// mustprogress attribute can be added.
629 if (CGM.getCodeGenOpts().getFiniteLoops() ==
631 return false;
632
633 // C++11 and later guarantees that a thread eventually will do one of the
634 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
635 // - terminate,
636 // - make a call to a library I/O function,
637 // - perform an access through a volatile glvalue, or
638 // - perform a synchronization operation or an atomic operation.
639 //
640 // Hence each function is 'mustprogress' in C++11 or later.
641 return getLangOpts().CPlusPlus11;
642 }
643
644 /// Returns true if a loop must make progress, which means the mustprogress
645 /// attribute can be added. \p HasConstantCond indicates whether the branch
646 /// condition is a known constant.
647 bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
648
650 llvm::Value *BlockPointer = nullptr;
651
652 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
654
655 /// A mapping from NRVO variables to the flags used to indicate
656 /// when the NRVO has been applied to this variable.
657 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
658
661
662 // A stack of cleanups which were added to EHStack but have to be deactivated
663 // later before being popped or emitted. These are usually deactivated on
664 // exiting a `CleanupDeactivationScope` scope. For instance, after a
665 // full-expr.
666 //
667 // These are specially useful for correctly emitting cleanups while
668 // encountering branches out of expression (through stmt-expr or coroutine
669 // suspensions).
672 llvm::Instruction *DominatingIP;
673 };
675
676 // Enters a new scope for capturing cleanups which are deferred to be
677 // deactivated, all of which will be deactivated once the scope is exited.
686
688 assert(!Deactivated && "Deactivating already deactivated scope");
690 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
691 CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
692 Stack[I - 1].DominatingIP);
693 Stack[I - 1].DominatingIP->eraseFromParent();
694 }
695 Stack.resize(OldDeactivateCleanupStackSize);
696 Deactivated = true;
697 }
698
700 if (Deactivated)
701 return;
703 }
704 };
705
707
708 llvm::Instruction *CurrentFuncletPad = nullptr;
709
711 bool isRedundantBeforeReturn() override { return true; }
712
713 llvm::Value *Addr;
714 llvm::Value *Size;
715
716 public:
717 CallLifetimeEnd(RawAddress addr, llvm::Value *size)
718 : Addr(addr.getPointer()), Size(size) {}
719
720 void Emit(CodeGenFunction &CGF, Flags flags) override {
721 CGF.EmitLifetimeEnd(Size, Addr);
722 }
723 };
724
725 /// Header for data within LifetimeExtendedCleanupStack.
727 /// The size of the following cleanup object.
728 unsigned Size;
729 /// The kind of cleanup to push.
730 LLVM_PREFERRED_TYPE(CleanupKind)
732 /// Whether this is a conditional cleanup.
733 LLVM_PREFERRED_TYPE(bool)
734 unsigned IsConditional : 1;
735
736 size_t getSize() const { return Size; }
737 CleanupKind getKind() const { return (CleanupKind)Kind; }
738 bool isConditional() const { return IsConditional; }
739 };
740
741 /// i32s containing the indexes of the cleanup destinations.
743
745
746 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
747 llvm::BasicBlock *EHResumeBlock = nullptr;
748
749 /// The exception slot. All landing pads write the current exception pointer
750 /// into this alloca.
751 llvm::Value *ExceptionSlot = nullptr;
752
753 /// The selector slot. Under the MandatoryCleanup model, all landing pads
754 /// write the current selector value into this alloca.
755 llvm::AllocaInst *EHSelectorSlot = nullptr;
756
757 /// A stack of exception code slots. Entering an __except block pushes a slot
758 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
759 /// a value from the top of the stack.
761
762 /// Value returned by __exception_info intrinsic.
763 llvm::Value *SEHInfo = nullptr;
764
765 /// Emits a landing pad for the current EH stack.
766 llvm::BasicBlock *EmitLandingPad();
767
768 llvm::BasicBlock *getInvokeDestImpl();
769
770 /// Parent loop-based directive for scan directive.
772 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
773 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
774 llvm::BasicBlock *OMPScanExitBlock = nullptr;
775 llvm::BasicBlock *OMPScanDispatch = nullptr;
776 bool OMPFirstScanLoop = false;
777
778 /// Manages parent directive for scan directives.
780 CodeGenFunction &CGF;
781 const OMPExecutableDirective *ParentLoopDirectiveForScan;
782
783 public:
785 CodeGenFunction &CGF,
786 const OMPExecutableDirective &ParentLoopDirectiveForScan)
787 : CGF(CGF),
788 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
789 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
790 }
792 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
793 }
794 };
795
796 template <class T>
798 return DominatingValue<T>::save(*this, value);
799 }
800
802 public:
803 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
806
807 private:
808 void ConstructorHelper(FPOptions FPFeatures);
809 CodeGenFunction &CGF;
810 FPOptions OldFPFeatures;
811 llvm::fp::ExceptionBehavior OldExcept;
812 llvm::RoundingMode OldRounding;
813 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
814 };
816
817public:
818 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
819 /// rethrows.
821
822 /// A class controlling the emission of a finally block.
824 /// Where the catchall's edge through the cleanup should go.
825 JumpDest RethrowDest;
826
827 /// A function to call to enter the catch.
828 llvm::FunctionCallee BeginCatchFn;
829
830 /// An i1 variable indicating whether or not the @finally is
831 /// running for an exception.
832 llvm::AllocaInst *ForEHVar = nullptr;
833
834 /// An i8* variable into which the exception pointer to rethrow
835 /// has been saved.
836 llvm::AllocaInst *SavedExnVar = nullptr;
837
838 public:
839 void enter(CodeGenFunction &CGF, const Stmt *Finally,
840 llvm::FunctionCallee beginCatchFn,
841 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
842 void exit(CodeGenFunction &CGF);
843 };
844
845 /// Returns true inside SEH __try blocks.
846 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
847
848 /// Returns true while emitting a cleanuppad.
849 bool isCleanupPadScope() const {
850 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
851 }
852
853 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
854 /// current full-expression. Safe against the possibility that
855 /// we're currently inside a conditionally-evaluated expression.
856 template <class T, class... As>
857 void pushFullExprCleanup(CleanupKind kind, As... A) {
858 // If we're not in a conditional branch, or if none of the
859 // arguments requires saving, then use the unconditional cleanup.
861 return EHStack.pushCleanup<T>(kind, A...);
862
863 // Stash values in a tuple so we can guarantee the order of saves.
864 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
865 SavedTuple Saved{saveValueInCond(A)...};
866
867 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
868 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
870 }
871
872 /// Queue a cleanup to be pushed after finishing the current full-expression,
873 /// potentially with an active flag.
874 template <class T, class... As>
877 return pushCleanupAfterFullExprWithActiveFlag<T>(
878 Kind, RawAddress::invalid(), A...);
879
880 RawAddress ActiveFlag = createCleanupActiveFlag();
881 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
882 "cleanup active flag should never need saving");
883
884 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
885 SavedTuple Saved{saveValueInCond(A)...};
886
887 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
888 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag, Saved);
889 }
890
891 template <class T, class... As>
893 RawAddress ActiveFlag, As... A) {
894 LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
895 ActiveFlag.isValid()};
896
899 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
900 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
901
902 static_assert(sizeof(Header) % alignof(T) == 0,
903 "Cleanup will be allocated on misaligned address");
904 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
905 new (Buffer) LifetimeExtendedCleanupHeader(Header);
906 new (Buffer + sizeof(Header)) T(A...);
907 if (Header.IsConditional)
908 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
909 }
910
911 // Push a cleanup onto EHStack and deactivate it later. It is usually
912 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
913 // full expression).
914 template <class T, class... As>
916 // Placeholder dominating IP for this cleanup.
917 llvm::Instruction *DominatingIP =
918 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
919 EHStack.pushCleanup<T>(Kind, A...);
921 {EHStack.stable_begin(), DominatingIP});
922 }
923
924 /// Set up the last cleanup that was pushed as a conditional
925 /// full-expression cleanup.
928 }
929
932
933 /// PushDestructorCleanup - Push a cleanup to call the
934 /// complete-object destructor of an object of the given type at the
935 /// given address. Does nothing if T is not a C++ class type with a
936 /// non-trivial destructor.
938
939 /// PushDestructorCleanup - Push a cleanup to call the
940 /// complete-object variant of the given destructor on the object at
941 /// the given address.
943 Address Addr);
944
945 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
946 /// process all branch fixups.
947 void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
948 bool ForDeactivation = false);
949
950 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
951 /// The block cannot be reactivated. Pops it if it's the top of the
952 /// stack.
953 ///
954 /// \param DominatingIP - An instruction which is known to
955 /// dominate the current IP (if set) and which lies along
956 /// all paths of execution between the current IP and the
957 /// the point at which the cleanup comes into scope.
959 llvm::Instruction *DominatingIP);
960
961 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
962 /// Cannot be used to resurrect a deactivated cleanup.
963 ///
964 /// \param DominatingIP - An instruction which is known to
965 /// dominate the current IP (if set) and which lies along
966 /// all paths of execution between the current IP and the
967 /// the point at which the cleanup comes into scope.
969 llvm::Instruction *DominatingIP);
970
971 /// Enters a new scope for capturing cleanups, all of which
972 /// will be executed once the scope is exited.
974 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
975 size_t LifetimeExtendedCleanupStackSize;
976 CleanupDeactivationScope DeactivateCleanups;
977 bool OldDidCallStackSave;
978 protected:
980 private:
981
982 RunCleanupsScope(const RunCleanupsScope &) = delete;
983 void operator=(const RunCleanupsScope &) = delete;
984
985 protected:
987
988 public:
989 /// Enter a new cleanup scope.
991 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
992 CleanupStackDepth = CGF.EHStack.stable_begin();
993 LifetimeExtendedCleanupStackSize =
995 OldDidCallStackSave = CGF.DidCallStackSave;
996 CGF.DidCallStackSave = false;
997 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
998 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
999 }
1000
1001 /// Exit this cleanup scope, emitting any accumulated cleanups.
1003 if (PerformCleanup)
1004 ForceCleanup();
1005 }
1006
1007 /// Determine whether this scope requires any cleanups.
1008 bool requiresCleanups() const {
1009 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1010 }
1011
1012 /// Force the emission of cleanups now, instead of waiting
1013 /// until this object is destroyed.
1014 /// \param ValuesToReload - A list of values that need to be available at
1015 /// the insertion point after cleanup emission. If cleanup emission created
1016 /// a shared cleanup block, these value pointers will be rewritten.
1017 /// Otherwise, they not will be modified.
1018 void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
1019 assert(PerformCleanup && "Already forced cleanup");
1020 CGF.DidCallStackSave = OldDidCallStackSave;
1021 DeactivateCleanups.ForceDeactivate();
1022 CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
1023 ValuesToReload);
1024 PerformCleanup = false;
1025 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1026 }
1027 };
1028
1029 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1032
1034 SourceRange Range;
1036 LexicalScope *ParentScope;
1037
1038 LexicalScope(const LexicalScope &) = delete;
1039 void operator=(const LexicalScope &) = delete;
1040
1041 public:
1042 /// Enter a new cleanup scope.
1044 : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
1045 CGF.CurLexicalScope = this;
1046 if (CGDebugInfo *DI = CGF.getDebugInfo())
1047 DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
1048 }
1049
1050 void addLabel(const LabelDecl *label) {
1051 assert(PerformCleanup && "adding label to dead scope?");
1052 Labels.push_back(label);
1053 }
1054
1055 /// Exit this cleanup scope, emitting any accumulated
1056 /// cleanups.
1058 if (CGDebugInfo *DI = CGF.getDebugInfo())
1059 DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
1060
1061 // If we should perform a cleanup, force them now. Note that
1062 // this ends the cleanup scope before rescoping any labels.
1063 if (PerformCleanup) {
1064 ApplyDebugLocation DL(CGF, Range.getEnd());
1065 ForceCleanup();
1066 }
1067 }
1068
1069 /// Force the emission of cleanups now, instead of waiting
1070 /// until this object is destroyed.
1072 CGF.CurLexicalScope = ParentScope;
1074
1075 if (!Labels.empty())
1076 rescopeLabels();
1077 }
1078
1079 bool hasLabels() const {
1080 return !Labels.empty();
1081 }
1082
1083 void rescopeLabels();
1084 };
1085
1086 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1087
1088 /// The class used to assign some variables some temporarily addresses.
1090 DeclMapTy SavedLocals;
1091 DeclMapTy SavedTempAddresses;
1092 OMPMapVars(const OMPMapVars &) = delete;
1093 void operator=(const OMPMapVars &) = delete;
1094
1095 public:
1096 explicit OMPMapVars() = default;
1098 assert(SavedLocals.empty() && "Did not restored original addresses.");
1099 };
1100
1101 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1102 /// function \p CGF.
1103 /// \return true if at least one variable was set already, false otherwise.
1104 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1105 Address TempAddr) {
1106 LocalVD = LocalVD->getCanonicalDecl();
1107 // Only save it once.
1108 if (SavedLocals.count(LocalVD)) return false;
1109
1110 // Copy the existing local entry to SavedLocals.
1111 auto it = CGF.LocalDeclMap.find(LocalVD);
1112 if (it != CGF.LocalDeclMap.end())
1113 SavedLocals.try_emplace(LocalVD, it->second);
1114 else
1115 SavedLocals.try_emplace(LocalVD, Address::invalid());
1116
1117 // Generate the private entry.
1118 QualType VarTy = LocalVD->getType();
1119 if (VarTy->isReferenceType()) {
1120 Address Temp = CGF.CreateMemTemp(VarTy);
1121 CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
1122 TempAddr = Temp;
1123 }
1124 SavedTempAddresses.try_emplace(LocalVD, TempAddr);
1125
1126 return true;
1127 }
1128
1129 /// Applies new addresses to the list of the variables.
1130 /// \return true if at least one variable is using new address, false
1131 /// otherwise.
1133 copyInto(SavedTempAddresses, CGF.LocalDeclMap);
1134 SavedTempAddresses.clear();
1135 return !SavedLocals.empty();
1136 }
1137
1138 /// Restores original addresses of the variables.
1140 if (!SavedLocals.empty()) {
1141 copyInto(SavedLocals, CGF.LocalDeclMap);
1142 SavedLocals.clear();
1143 }
1144 }
1145
1146 private:
1147 /// Copy all the entries in the source map over the corresponding
1148 /// entries in the destination, which must exist.
1149 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1150 for (auto &[Decl, Addr] : Src) {
1151 if (!Addr.isValid())
1152 Dest.erase(Decl);
1153 else
1154 Dest.insert_or_assign(Decl, Addr);
1155 }
1156 }
1157 };
1158
1159 /// The scope used to remap some variables as private in the OpenMP loop body
1160 /// (or other captured region emitted without outlining), and to restore old
1161 /// vars back on exit.
1163 OMPMapVars MappedVars;
1164 OMPPrivateScope(const OMPPrivateScope &) = delete;
1165 void operator=(const OMPPrivateScope &) = delete;
1166
1167 public:
1168 /// Enter a new OpenMP private scope.
1170
1171 /// Registers \p LocalVD variable as a private with \p Addr as the address
1172 /// of the corresponding private variable. \p
1173 /// PrivateGen is the address of the generated private variable.
1174 /// \return true if the variable is registered as private, false if it has
1175 /// been privatized already.
1176 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1177 assert(PerformCleanup && "adding private to dead scope");
1178 return MappedVars.setVarAddr(CGF, LocalVD, Addr);
1179 }
1180
1181 /// Privatizes local variables previously registered as private.
1182 /// Registration is separate from the actual privatization to allow
1183 /// initializers use values of the original variables, not the private one.
1184 /// This is important, for example, if the private variable is a class
1185 /// variable initialized by a constructor that references other private
1186 /// variables. But at initialization original variables must be used, not
1187 /// private copies.
1188 /// \return true if at least one variable was privatized, false otherwise.
1189 bool Privatize() { return MappedVars.apply(CGF); }
1190
1193 restoreMap();
1194 }
1195
1196 /// Exit scope - all the mapped variables are restored.
1198 if (PerformCleanup)
1199 ForceCleanup();
1200 }
1201
1202 /// Checks if the global variable is captured in current function.
1203 bool isGlobalVarCaptured(const VarDecl *VD) const {
1204 VD = VD->getCanonicalDecl();
1205 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
1206 }
1207
1208 /// Restore all mapped variables w/o clean up. This is usefully when we want
1209 /// to reference the original variables but don't want the clean up because
1210 /// that could emit lifetime end too early, causing backend issue #56913.
1211 void restoreMap() { MappedVars.restore(CGF); }
1212 };
1213
1214 /// Save/restore original map of previously emitted local vars in case when we
1215 /// need to duplicate emission of the same code several times in the same
1216 /// function for OpenMP code.
1218 CodeGenFunction &CGF;
1219 DeclMapTy SavedMap;
1220
1221 public:
1223 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1224 ~OMPLocalDeclMapRAII() { SavedMap.swap(CGF.LocalDeclMap); }
1225 };
1226
1227 /// Takes the old cleanup stack size and emits the cleanup blocks
1228 /// that have been added.
1229 void
1231 std::initializer_list<llvm::Value **> ValuesToReload = {});
1232
1233 /// Takes the old cleanup stack size and emits the cleanup blocks
1234 /// that have been added, then adds all lifetime-extended cleanups from
1235 /// the given position to the stack.
1236 void
1238 size_t OldLifetimeExtendedStackSize,
1239 std::initializer_list<llvm::Value **> ValuesToReload = {});
1240
1241 void ResolveBranchFixups(llvm::BasicBlock *Target);
1242
1243 /// The given basic block lies in the current EH scope, but may be a
1244 /// target of a potentially scope-crossing jump; get a stable handle
1245 /// to which we can perform this jump later.
1247 return JumpDest(Target,
1250 }
1251
1252 /// The given basic block lies in the current EH scope, but may be a
1253 /// target of a potentially scope-crossing jump; get a stable handle
1254 /// to which we can perform this jump later.
1255 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1257 }
1258
1259 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1260 /// block through the normal cleanup handling code (if any) and then
1261 /// on to \arg Dest.
1263
1264 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1265 /// specified destination obviously has no cleanups to run. 'false' is always
1266 /// a conservatively correct answer for this method.
1268
1269 /// popCatchScope - Pops the catch scope at the top of the EHScope
1270 /// stack, emitting any required code (other than the catch handlers
1271 /// themselves).
1273
1274 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1276 llvm::BasicBlock *
1278
1279 /// An object to manage conditionally-evaluated expressions.
1281 llvm::BasicBlock *StartBB;
1282
1283 public:
1285 : StartBB(CGF.Builder.GetInsertBlock()) {}
1286
1288 assert(CGF.OutermostConditional != this);
1289 if (!CGF.OutermostConditional)
1290 CGF.OutermostConditional = this;
1291 }
1292
1294 assert(CGF.OutermostConditional != nullptr);
1295 if (CGF.OutermostConditional == this)
1296 CGF.OutermostConditional = nullptr;
1297 }
1298
1299 /// Returns a block which will be executed prior to each
1300 /// evaluation of the conditional code.
1301 llvm::BasicBlock *getStartingBlock() const {
1302 return StartBB;
1303 }
1304 };
1305
1306 /// isInConditionalBranch - Return true if we're currently emitting
1307 /// one branch or the other of a conditional expression.
1308 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1309
1310 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1311 CodeGenFunction &CGF) {
1312 assert(isInConditionalBranch());
1313 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1314 auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF),
1315 block->back().getIterator());
1316 store->setAlignment(addr.getAlignment().getAsAlign());
1317 }
1318
1319 /// An RAII object to record that we're evaluating a statement
1320 /// expression.
1322 CodeGenFunction &CGF;
1323
1324 /// We have to save the outermost conditional: cleanups in a
1325 /// statement expression aren't conditional just because the
1326 /// StmtExpr is.
1327 ConditionalEvaluation *SavedOutermostConditional;
1328
1329 public:
1331 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1332 CGF.OutermostConditional = nullptr;
1333 }
1334
1336 CGF.OutermostConditional = SavedOutermostConditional;
1337 CGF.EnsureInsertPoint();
1338 }
1339 };
1340
1341 /// An object which temporarily prevents a value from being
1342 /// destroyed by aggressive peephole optimizations that assume that
1343 /// all uses of a value have been realized in the IR.
1345 llvm::Instruction *Inst = nullptr;
1346 friend class CodeGenFunction;
1347
1348 public:
1350 };
1351
1352 /// A non-RAII class containing all the information about a bound
1353 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1354 /// this which makes individual mappings very simple; using this
1355 /// class directly is useful when you have a variable number of
1356 /// opaque values or don't want the RAII functionality for some
1357 /// reason.
1359 const OpaqueValueExpr *OpaqueValue;
1360 bool BoundLValue;
1362
1364 bool boundLValue)
1365 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1366 public:
1367 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1368
1369 static bool shouldBindAsLValue(const Expr *expr) {
1370 // gl-values should be bound as l-values for obvious reasons.
1371 // Records should be bound as l-values because IR generation
1372 // always keeps them in memory. Expressions of function type
1373 // act exactly like l-values but are formally required to be
1374 // r-values in C.
1375 return expr->isGLValue() ||
1376 expr->getType()->isFunctionType() ||
1377 hasAggregateEvaluationKind(expr->getType());
1378 }
1379
1381 const OpaqueValueExpr *ov,
1382 const Expr *e) {
1383 if (shouldBindAsLValue(ov))
1384 return bind(CGF, ov, CGF.EmitLValue(e));
1385 return bind(CGF, ov, CGF.EmitAnyExpr(e));
1386 }
1387
1389 const OpaqueValueExpr *ov,
1390 const LValue &lv) {
1391 assert(shouldBindAsLValue(ov));
1392 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1393 return OpaqueValueMappingData(ov, true);
1394 }
1395
1397 const OpaqueValueExpr *ov,
1398 const RValue &rv) {
1399 assert(!shouldBindAsLValue(ov));
1400 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1401
1402 OpaqueValueMappingData data(ov, false);
1403
1404 // Work around an extremely aggressive peephole optimization in
1405 // EmitScalarConversion which assumes that all other uses of a
1406 // value are extant.
1407 data.Protection = CGF.protectFromPeepholes(rv);
1408
1409 return data;
1410 }
1411
1412 bool isValid() const { return OpaqueValue != nullptr; }
1413 void clear() { OpaqueValue = nullptr; }
1414
1416 assert(OpaqueValue && "no data to unbind!");
1417
1418 if (BoundLValue) {
1419 CGF.OpaqueLValues.erase(OpaqueValue);
1420 } else {
1421 CGF.OpaqueRValues.erase(OpaqueValue);
1422 CGF.unprotectFromPeepholes(Protection);
1423 }
1424 }
1425 };
1426
1427 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1429 CodeGenFunction &CGF;
1431
1432 public:
1433 static bool shouldBindAsLValue(const Expr *expr) {
1435 }
1436
1437 /// Build the opaque value mapping for the given conditional
1438 /// operator if it's the GNU ?: extension. This is a common
1439 /// enough pattern that the convenience operator is really
1440 /// helpful.
1441 ///
1443 const AbstractConditionalOperator *op) : CGF(CGF) {
1444 if (isa<ConditionalOperator>(op))
1445 // Leave Data empty.
1446 return;
1447
1448 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1450 e->getCommon());
1451 }
1452
1453 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1454 /// expression is set to the expression the OVE represents.
1456 : CGF(CGF) {
1457 if (OV) {
1458 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1459 "for OVE with no source expression");
1461 }
1462 }
1463
1465 const OpaqueValueExpr *opaqueValue,
1466 LValue lvalue)
1467 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1468 }
1469
1471 const OpaqueValueExpr *opaqueValue,
1472 RValue rvalue)
1473 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1474 }
1475
1476 void pop() {
1477 Data.unbind(CGF);
1478 Data.clear();
1479 }
1480
1482 if (Data.isValid()) Data.unbind(CGF);
1483 }
1484 };
1485
1486private:
1487 CGDebugInfo *DebugInfo;
1488 /// Used to create unique names for artificial VLA size debug info variables.
1489 unsigned VLAExprCounter = 0;
1490 bool DisableDebugInfo = false;
1491
1492 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1493 /// calling llvm.stacksave for multiple VLAs in the same scope.
1494 bool DidCallStackSave = false;
1495
1496 /// IndirectBranch - The first time an indirect goto is seen we create a block
1497 /// with an indirect branch. Every time we see the address of a label taken,
1498 /// we add the label to the indirect goto. Every subsequent indirect goto is
1499 /// codegen'd as a jump to the IndirectBranch's basic block.
1500 llvm::IndirectBrInst *IndirectBranch = nullptr;
1501
1502 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1503 /// decls.
1504 DeclMapTy LocalDeclMap;
1505
1506 // Keep track of the cleanups for callee-destructed parameters pushed to the
1507 // cleanup stack so that they can be deactivated later.
1508 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1509 CalleeDestructedParamCleanups;
1510
1511 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1512 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1513 /// parameter.
1514 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1515 SizeArguments;
1516
1517 /// Track escaped local variables with auto storage. Used during SEH
1518 /// outlining to produce a call to llvm.localescape.
1519 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1520
1521 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1522 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1523
1524 // BreakContinueStack - This keeps track of where break and continue
1525 // statements should jump to.
1526 struct BreakContinue {
1527 BreakContinue(JumpDest Break, JumpDest Continue)
1528 : BreakBlock(Break), ContinueBlock(Continue) {}
1529
1530 JumpDest BreakBlock;
1531 JumpDest ContinueBlock;
1532 };
1533 SmallVector<BreakContinue, 8> BreakContinueStack;
1534
1535 /// Handles cancellation exit points in OpenMP-related constructs.
1536 class OpenMPCancelExitStack {
1537 /// Tracks cancellation exit point and join point for cancel-related exit
1538 /// and normal exit.
1539 struct CancelExit {
1540 CancelExit() = default;
1541 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1542 JumpDest ContBlock)
1543 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1544 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1545 /// true if the exit block has been emitted already by the special
1546 /// emitExit() call, false if the default codegen is used.
1547 bool HasBeenEmitted = false;
1548 JumpDest ExitBlock;
1549 JumpDest ContBlock;
1550 };
1551
1552 SmallVector<CancelExit, 8> Stack;
1553
1554 public:
1555 OpenMPCancelExitStack() : Stack(1) {}
1556 ~OpenMPCancelExitStack() = default;
1557 /// Fetches the exit block for the current OpenMP construct.
1558 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1559 /// Emits exit block with special codegen procedure specific for the related
1560 /// OpenMP construct + emits code for normal construct cleanup.
1561 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1562 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1563 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1564 assert(CGF.getOMPCancelDestination(Kind).isValid());
1565 assert(CGF.HaveInsertPoint());
1566 assert(!Stack.back().HasBeenEmitted);
1567 auto IP = CGF.Builder.saveAndClearIP();
1568 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1569 CodeGen(CGF);
1570 CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1571 CGF.Builder.restoreIP(IP);
1572 Stack.back().HasBeenEmitted = true;
1573 }
1574 CodeGen(CGF);
1575 }
1576 /// Enter the cancel supporting \a Kind construct.
1577 /// \param Kind OpenMP directive that supports cancel constructs.
1578 /// \param HasCancel true, if the construct has inner cancel directive,
1579 /// false otherwise.
1580 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1581 Stack.push_back({Kind,
1582 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1583 : JumpDest(),
1584 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1585 : JumpDest()});
1586 }
1587 /// Emits default exit point for the cancel construct (if the special one
1588 /// has not be used) + join point for cancel/normal exits.
1589 void exit(CodeGenFunction &CGF) {
1590 if (getExitBlock().isValid()) {
1591 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1592 bool HaveIP = CGF.HaveInsertPoint();
1593 if (!Stack.back().HasBeenEmitted) {
1594 if (HaveIP)
1595 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1596 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1597 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1598 }
1599 CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1600 if (!HaveIP) {
1601 CGF.Builder.CreateUnreachable();
1602 CGF.Builder.ClearInsertionPoint();
1603 }
1604 }
1605 Stack.pop_back();
1606 }
1607 };
1608 OpenMPCancelExitStack OMPCancelStack;
1609
1610 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1611 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1612 Stmt::Likelihood LH);
1613
1614 CodeGenPGO PGO;
1615
1616 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1617 Address MCDCCondBitmapAddr = Address::invalid();
1618
1619 /// Calculate branch weights appropriate for PGO data
1620 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1621 uint64_t FalseCount) const;
1622 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1623 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1624 uint64_t LoopCount) const;
1625
1626public:
1627 auto getIsCounterPair(const Stmt *S) const { return PGO.getIsCounterPair(S); }
1628
1629 void markStmtAsUsed(bool Skipped, const Stmt *S) {
1630 PGO.markStmtAsUsed(Skipped, S);
1631 }
1632 void markStmtMaybeUsed(const Stmt *S) { PGO.markStmtMaybeUsed(S); }
1633
1634 /// Increment the profiler's counter for the given statement by \p StepV.
1635 /// If \p StepV is null, the default increment is 1.
1636 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1638 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
1639 !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile)) {
1640 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1641 PGO.emitCounterSetOrIncrement(Builder, S, StepV);
1642 }
1643 PGO.setCurrentStmt(S);
1644 }
1645
1648 CGM.getCodeGenOpts().MCDCCoverage &&
1649 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
1650 }
1651
1652 /// Allocate a temp value on the stack that MCDC can use to track condition
1653 /// results.
1655 if (isMCDCCoverageEnabled()) {
1656 PGO.emitMCDCParameters(Builder);
1657 MCDCCondBitmapAddr =
1658 CreateIRTemp(getContext().UnsignedIntTy, "mcdc.addr");
1659 }
1660 }
1661
1662 bool isBinaryLogicalOp(const Expr *E) const {
1663 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
1664 return (BOp && BOp->isLogicalOp());
1665 }
1666
1667 /// Zero-init the MCDC temp value.
1670 PGO.emitMCDCCondBitmapReset(Builder, E, MCDCCondBitmapAddr);
1671 PGO.setCurrentStmt(E);
1672 }
1673 }
1674
1675 /// Increment the profiler's counter for the given expression by \p StepV.
1676 /// If \p StepV is null, the default increment is 1.
1679 PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this);
1680 PGO.setCurrentStmt(E);
1681 }
1682 }
1683
1684 /// Update the MCDC temp value with the condition's evaluated result.
1685 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
1686 if (isMCDCCoverageEnabled()) {
1687 PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this);
1688 PGO.setCurrentStmt(E);
1689 }
1690 }
1691
1692 /// Get the profiler's count for the given statement.
1693 uint64_t getProfileCount(const Stmt *S) {
1694 return PGO.getStmtCount(S).value_or(0);
1695 }
1696
1697 /// Set the profiler's current count.
1698 void setCurrentProfileCount(uint64_t Count) {
1699 PGO.setCurrentRegionCount(Count);
1700 }
1701
1702 /// Get the profiler's current count. This is generally the count for the most
1703 /// recently incremented counter.
1705 return PGO.getCurrentRegionCount();
1706 }
1707
1708private:
1709
1710 /// SwitchInsn - This is nearest current switch instruction. It is null if
1711 /// current context is not in a switch.
1712 llvm::SwitchInst *SwitchInsn = nullptr;
1713 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1714 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1715
1716 /// The likelihood attributes of the SwitchCase.
1717 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1718
1719 /// CaseRangeBlock - This block holds if condition check for last case
1720 /// statement range in current switch instruction.
1721 llvm::BasicBlock *CaseRangeBlock = nullptr;
1722
1723 /// OpaqueLValues - Keeps track of the current set of opaque value
1724 /// expressions.
1725 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1726 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1727
1728 // VLASizeMap - This keeps track of the associated size for each VLA type.
1729 // We track this by the size expression rather than the type itself because
1730 // in certain situations, like a const qualifier applied to an VLA typedef,
1731 // multiple VLA types can share the same size expression.
1732 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1733 // enter/leave scopes.
1734 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1735
1736 /// A block containing a single 'unreachable' instruction. Created
1737 /// lazily by getUnreachableBlock().
1738 llvm::BasicBlock *UnreachableBlock = nullptr;
1739
1740 /// Counts of the number return expressions in the function.
1741 unsigned NumReturnExprs = 0;
1742
1743 /// Count the number of simple (constant) return expressions in the function.
1744 unsigned NumSimpleReturnExprs = 0;
1745
1746 /// The last regular (non-return) debug location (breakpoint) in the function.
1747 SourceLocation LastStopPoint;
1748
1749public:
1750 /// Source location information about the default argument or member
1751 /// initializer expression we're evaluating, if any.
1755
1756 /// A scope within which we are constructing the fields of an object which
1757 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1758 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1760 public:
1762 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1763 CGF.CXXDefaultInitExprThis = This;
1764 }
1766 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1767 }
1768
1769 private:
1770 CodeGenFunction &CGF;
1771 Address OldCXXDefaultInitExprThis;
1772 };
1773
1774 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1775 /// is overridden to be the object under construction.
1777 public:
1779 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1780 OldCXXThisAlignment(CGF.CXXThisAlignment),
1782 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1783 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1784 }
1786 CGF.CXXThisValue = OldCXXThisValue;
1787 CGF.CXXThisAlignment = OldCXXThisAlignment;
1788 }
1789
1790 public:
1792 llvm::Value *OldCXXThisValue;
1795 };
1796
1800 };
1801
1802 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1803 /// current loop index is overridden.
1805 public:
1806 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1807 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1808 CGF.ArrayInitIndex = Index;
1809 }
1811 CGF.ArrayInitIndex = OldArrayInitIndex;
1812 }
1813
1814 private:
1815 CodeGenFunction &CGF;
1816 llvm::Value *OldArrayInitIndex;
1817 };
1818
1820 public:
1822 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1823 OldCurCodeDecl(CGF.CurCodeDecl),
1824 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1825 OldCXXABIThisValue(CGF.CXXABIThisValue),
1826 OldCXXThisValue(CGF.CXXThisValue),
1827 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1828 OldCXXThisAlignment(CGF.CXXThisAlignment),
1829 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1830 OldCXXInheritedCtorInitExprArgs(
1831 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1832 CGF.CurGD = GD;
1833 CGF.CurFuncDecl = CGF.CurCodeDecl =
1834 cast<CXXConstructorDecl>(GD.getDecl());
1835 CGF.CXXABIThisDecl = nullptr;
1836 CGF.CXXABIThisValue = nullptr;
1837 CGF.CXXThisValue = nullptr;
1838 CGF.CXXABIThisAlignment = CharUnits();
1839 CGF.CXXThisAlignment = CharUnits();
1841 CGF.FnRetTy = QualType();
1842 CGF.CXXInheritedCtorInitExprArgs.clear();
1843 }
1845 CGF.CurGD = OldCurGD;
1846 CGF.CurFuncDecl = OldCurFuncDecl;
1847 CGF.CurCodeDecl = OldCurCodeDecl;
1848 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1849 CGF.CXXABIThisValue = OldCXXABIThisValue;
1850 CGF.CXXThisValue = OldCXXThisValue;
1851 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1852 CGF.CXXThisAlignment = OldCXXThisAlignment;
1853 CGF.ReturnValue = OldReturnValue;
1854 CGF.FnRetTy = OldFnRetTy;
1855 CGF.CXXInheritedCtorInitExprArgs =
1856 std::move(OldCXXInheritedCtorInitExprArgs);
1857 }
1858
1859 private:
1860 CodeGenFunction &CGF;
1861 GlobalDecl OldCurGD;
1862 const Decl *OldCurFuncDecl;
1863 const Decl *OldCurCodeDecl;
1864 ImplicitParamDecl *OldCXXABIThisDecl;
1865 llvm::Value *OldCXXABIThisValue;
1866 llvm::Value *OldCXXThisValue;
1867 CharUnits OldCXXABIThisAlignment;
1868 CharUnits OldCXXThisAlignment;
1869 Address OldReturnValue;
1870 QualType OldFnRetTy;
1871 CallArgList OldCXXInheritedCtorInitExprArgs;
1872 };
1873
1874 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1875 // region body, and finalization codegen callbacks. This will class will also
1876 // contain privatization functions used by the privatization call backs
1877 //
1878 // TODO: this is temporary class for things that are being moved out of
1879 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1880 // utility function for use with the OMPBuilder. Once that move to use the
1881 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1882 // directly, or a new helper class that will contain functions used by both
1883 // this and the OMPBuilder
1884
1886
1890
1891 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1892
1893 /// Cleanup action for allocate support.
1895
1896 private:
1897 llvm::CallInst *RTLFnCI;
1898
1899 public:
1900 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1901 RLFnCI->removeFromParent();
1902 }
1903
1904 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1905 if (!CGF.HaveInsertPoint())
1906 return;
1907 CGF.Builder.Insert(RTLFnCI);
1908 }
1909 };
1910
1911 /// Returns address of the threadprivate variable for the current
1912 /// thread. This Also create any necessary OMP runtime calls.
1913 ///
1914 /// \param VD VarDecl for Threadprivate variable.
1915 /// \param VDAddr Address of the Vardecl
1916 /// \param Loc The location where the barrier directive was encountered
1918 const VarDecl *VD, Address VDAddr,
1920
1921 /// Gets the OpenMP-specific address of the local variable /p VD.
1923 const VarDecl *VD);
1924 /// Get the platform-specific name separator.
1925 /// \param Parts different parts of the final name that needs separation
1926 /// \param FirstSeparator First separator used between the initial two
1927 /// parts of the name.
1928 /// \param Separator separator used between all of the rest consecutinve
1929 /// parts of the name
1930 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1931 StringRef FirstSeparator = ".",
1932 StringRef Separator = ".");
1933 /// Emit the Finalization for an OMP region
1934 /// \param CGF The Codegen function this belongs to
1935 /// \param IP Insertion point for generating the finalization code.
1937 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1938 assert(IP.getBlock()->end() != IP.getPoint() &&
1939 "OpenMP IR Builder should cause terminated block!");
1940
1941 llvm::BasicBlock *IPBB = IP.getBlock();
1942 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1943 assert(DestBB && "Finalization block should have one successor!");
1944
1945 // erase and replace with cleanup branch.
1946 IPBB->getTerminator()->eraseFromParent();
1947 CGF.Builder.SetInsertPoint(IPBB);
1949 CGF.EmitBranchThroughCleanup(Dest);
1950 }
1951
1952 /// Emit the body of an OMP region
1953 /// \param CGF The Codegen function this belongs to
1954 /// \param RegionBodyStmt The body statement for the OpenMP region being
1955 /// generated
1956 /// \param AllocaIP Where to insert alloca instructions
1957 /// \param CodeGenIP Where to insert the region code
1958 /// \param RegionName Name to be used for new blocks
1960 const Stmt *RegionBodyStmt,
1961 InsertPointTy AllocaIP,
1962 InsertPointTy CodeGenIP,
1963 Twine RegionName);
1964
1965 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
1966 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
1968 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1969 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
1970 CodeGenIPBBTI->eraseFromParent();
1971
1972 CGF.Builder.SetInsertPoint(CodeGenIPBB);
1973
1974 if (Fn->doesNotThrow())
1975 CGF.EmitNounwindRuntimeCall(Fn, Args);
1976 else
1977 CGF.EmitRuntimeCall(Fn, Args);
1978
1979 if (CGF.Builder.saveIP().isSet())
1980 CGF.Builder.CreateBr(&FiniBB);
1981 }
1982
1983 /// Emit the body of an OMP region that will be outlined in
1984 /// OpenMPIRBuilder::finalize().
1985 /// \param CGF The Codegen function this belongs to
1986 /// \param RegionBodyStmt The body statement for the OpenMP region being
1987 /// generated
1988 /// \param AllocaIP Where to insert alloca instructions
1989 /// \param CodeGenIP Where to insert the region code
1990 /// \param RegionName Name to be used for new blocks
1992 const Stmt *RegionBodyStmt,
1993 InsertPointTy AllocaIP,
1994 InsertPointTy CodeGenIP,
1995 Twine RegionName);
1996
1997 /// RAII for preserving necessary info during Outlined region body codegen.
1999
2000 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2001 CodeGenFunction::JumpDest OldReturnBlock;
2002 CodeGenFunction &CGF;
2003
2004 public:
2006 llvm::BasicBlock &RetBB)
2007 : CGF(cgf) {
2008 assert(AllocaIP.isSet() &&
2009 "Must specify Insertion point for allocas of outlined function");
2010 OldAllocaIP = CGF.AllocaInsertPt;
2011 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2012
2013 OldReturnBlock = CGF.ReturnBlock;
2014 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
2015 }
2016
2018 CGF.AllocaInsertPt = OldAllocaIP;
2019 CGF.ReturnBlock = OldReturnBlock;
2020 }
2021 };
2022
2023 /// RAII for preserving necessary info during inlined region body codegen.
2025
2026 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2027 CodeGenFunction &CGF;
2028
2029 public:
2031 llvm::BasicBlock &FiniBB)
2032 : CGF(cgf) {
2033 // Alloca insertion block should be in the entry block of the containing
2034 // function so it expects an empty AllocaIP in which case will reuse the
2035 // old alloca insertion point, or a new AllocaIP in the same block as
2036 // the old one
2037 assert((!AllocaIP.isSet() ||
2038 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2039 "Insertion point should be in the entry block of containing "
2040 "function!");
2041 OldAllocaIP = CGF.AllocaInsertPt;
2042 if (AllocaIP.isSet())
2043 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2044
2045 // TODO: Remove the call, after making sure the counter is not used by
2046 // the EHStack.
2047 // Since this is an inlined region, it should not modify the
2048 // ReturnBlock, and should reuse the one for the enclosing outlined
2049 // region. So, the JumpDest being return by the function is discarded
2050 (void)CGF.getJumpDestInCurrentScope(&FiniBB);
2051 }
2052
2054 };
2055 };
2056
2057private:
2058 /// CXXThisDecl - When generating code for a C++ member function,
2059 /// this will hold the implicit 'this' declaration.
2060 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2061 llvm::Value *CXXABIThisValue = nullptr;
2062 llvm::Value *CXXThisValue = nullptr;
2063 CharUnits CXXABIThisAlignment;
2064 CharUnits CXXThisAlignment;
2065
2066 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2067 /// this expression.
2068 Address CXXDefaultInitExprThis = Address::invalid();
2069
2070 /// The current array initialization index when evaluating an
2071 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2072 llvm::Value *ArrayInitIndex = nullptr;
2073
2074 /// The values of function arguments to use when evaluating
2075 /// CXXInheritedCtorInitExprs within this context.
2076 CallArgList CXXInheritedCtorInitExprArgs;
2077
2078 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2079 /// destructor, this will hold the implicit argument (e.g. VTT).
2080 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2081 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2082
2083 /// OutermostConditional - Points to the outermost active
2084 /// conditional control. This is used so that we know if a
2085 /// temporary should be destroyed conditionally.
2086 ConditionalEvaluation *OutermostConditional = nullptr;
2087
2088 /// The current lexical scope.
2089 LexicalScope *CurLexicalScope = nullptr;
2090
2091 /// The current source location that should be used for exception
2092 /// handling code.
2093 SourceLocation CurEHLocation;
2094
2095 /// BlockByrefInfos - For each __block variable, contains
2096 /// information about the layout of the variable.
2097 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2098
2099 /// Used by -fsanitize=nullability-return to determine whether the return
2100 /// value can be checked.
2101 llvm::Value *RetValNullabilityPrecondition = nullptr;
2102
2103 /// Check if -fsanitize=nullability-return instrumentation is required for
2104 /// this function.
2105 bool requiresReturnValueNullabilityCheck() const {
2106 return RetValNullabilityPrecondition;
2107 }
2108
2109 /// Used to store precise source locations for return statements by the
2110 /// runtime return value checks.
2111 Address ReturnLocation = Address::invalid();
2112
2113 /// Check if the return value of this function requires sanitization.
2114 bool requiresReturnValueCheck() const;
2115
2116 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2117 bool hasInAllocaArg(const CXXMethodDecl *MD);
2118
2119 llvm::BasicBlock *TerminateLandingPad = nullptr;
2120 llvm::BasicBlock *TerminateHandler = nullptr;
2122
2123 /// Terminate funclets keyed by parent funclet pad.
2124 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2125
2126 /// Largest vector width used in ths function. Will be used to create a
2127 /// function attribute.
2128 unsigned LargestVectorWidth = 0;
2129
2130 /// True if we need emit the life-time markers. This is initially set in
2131 /// the constructor, but could be overwritten to true if this is a coroutine.
2132 bool ShouldEmitLifetimeMarkers;
2133
2134 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2135 /// the function metadata.
2136 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2137
2138public:
2139 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
2141
2142 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2143 ASTContext &getContext() const { return CGM.getContext(); }
2145 if (DisableDebugInfo)
2146 return nullptr;
2147 return DebugInfo;
2148 }
2149 void disableDebugInfo() { DisableDebugInfo = true; }
2150 void enableDebugInfo() { DisableDebugInfo = false; }
2151
2153 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2154 }
2155
2156 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2157
2158 /// Returns a pointer to the function's exception object and selector slot,
2159 /// which is assigned in every landing pad.
2162
2163 /// Returns the contents of the function's exception object and selector
2164 /// slots.
2165 llvm::Value *getExceptionFromSlot();
2166 llvm::Value *getSelectorFromSlot();
2167
2169
2170 llvm::BasicBlock *getUnreachableBlock() {
2171 if (!UnreachableBlock) {
2172 UnreachableBlock = createBasicBlock("unreachable");
2173 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2174 }
2175 return UnreachableBlock;
2176 }
2177
2178 llvm::BasicBlock *getInvokeDest() {
2179 if (!EHStack.requiresLandingPad()) return nullptr;
2180 return getInvokeDestImpl();
2181 }
2182
2183 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2184
2185 const TargetInfo &getTarget() const { return Target; }
2186 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2188 return CGM.getTargetCodeGenInfo();
2189 }
2190
2191 //===--------------------------------------------------------------------===//
2192 // Cleanups
2193 //===--------------------------------------------------------------------===//
2194
2195 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2196
2197 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2198 Address arrayEndPointer,
2199 QualType elementType,
2200 CharUnits elementAlignment,
2201 Destroyer *destroyer);
2202 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2203 llvm::Value *arrayEnd,
2204 QualType elementType,
2205 CharUnits elementAlignment,
2206 Destroyer *destroyer);
2207
2209 Address addr, QualType type);
2211 Address addr, QualType type);
2213 Destroyer *destroyer, bool useEHCleanupForArray);
2215 Address addr, QualType type);
2217 QualType type, Destroyer *destroyer,
2218 bool useEHCleanupForArray);
2220 QualType type, Destroyer *destroyer,
2221 bool useEHCleanupForArray);
2222 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2223 llvm::Value *CompletePtr,
2224 QualType ElementType);
2227 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2229 bool useEHCleanupForArray);
2231 Destroyer *destroyer,
2232 bool useEHCleanupForArray,
2233 const VarDecl *VD);
2234 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2235 QualType elementType, CharUnits elementAlign,
2236 Destroyer *destroyer,
2237 bool checkZeroLength, bool useEHCleanup);
2238
2240
2241 /// Determines whether an EH cleanup is required to destroy a type
2242 /// with the given destruction kind.
2244 switch (kind) {
2245 case QualType::DK_none:
2246 return false;
2250 return getLangOpts().Exceptions;
2252 return getLangOpts().Exceptions &&
2253 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2254 }
2255 llvm_unreachable("bad destruction kind");
2256 }
2257
2260 }
2261
2262 //===--------------------------------------------------------------------===//
2263 // Objective-C
2264 //===--------------------------------------------------------------------===//
2265
2267
2269
2270 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2272 const ObjCPropertyImplDecl *PID);
2274 const ObjCPropertyImplDecl *propImpl,
2275 const ObjCMethodDecl *GetterMothodDecl,
2276 llvm::Constant *AtomicHelperFn);
2277
2279 ObjCMethodDecl *MD, bool ctor);
2280
2281 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2282 /// for the given property.
2284 const ObjCPropertyImplDecl *PID);
2286 const ObjCPropertyImplDecl *propImpl,
2287 llvm::Constant *AtomicHelperFn);
2288
2289 //===--------------------------------------------------------------------===//
2290 // Block Bits
2291 //===--------------------------------------------------------------------===//
2292
2293 /// Emit block literal.
2294 /// \return an LLVM value which is a pointer to a struct which contains
2295 /// information about the block, including the block invoke function, the
2296 /// captured variables, etc.
2297 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2298
2300 const CGBlockInfo &Info,
2301 const DeclMapTy &ldm,
2302 bool IsLambdaConversionToBlock,
2303 bool BuildGlobalBlock);
2304
2305 /// Check if \p T is a C++ class that has a destructor that can throw.
2307
2308 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2309 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2311 const ObjCPropertyImplDecl *PID);
2313 const ObjCPropertyImplDecl *PID);
2314 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2315
2316 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2317 bool CanThrow);
2318
2319 class AutoVarEmission;
2320
2322
2323 /// Enter a cleanup to destroy a __block variable. Note that this
2324 /// cleanup should be a no-op if the variable hasn't left the stack
2325 /// yet; if a cleanup is required for the variable itself, that needs
2326 /// to be done externally.
2327 ///
2328 /// \param Kind Cleanup kind.
2329 ///
2330 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2331 /// structure that will be passed to _Block_object_dispose. When
2332 /// \p LoadBlockVarAddr is true, the address of the field of the block
2333 /// structure that holds the address of the __block structure.
2334 ///
2335 /// \param Flags The flag that will be passed to _Block_object_dispose.
2336 ///
2337 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2338 /// \p Addr to get the address of the __block structure.
2340 bool LoadBlockVarAddr, bool CanThrow);
2341
2342 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2343 llvm::Value *ptr);
2344
2347
2348 /// BuildBlockByrefAddress - Computes the location of the
2349 /// data in a variable which is declared as __block.
2351 bool followForward = true);
2353 const BlockByrefInfo &info,
2354 bool followForward,
2355 const llvm::Twine &name);
2356
2358
2360
2361 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2362 const CGFunctionInfo &FnInfo);
2363
2364 /// Annotate the function with an attribute that disables TSan checking at
2365 /// runtime.
2366 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2367
2368 /// Emit code for the start of a function.
2369 /// \param Loc The location to be associated with the function.
2370 /// \param StartLoc The location of the function body.
2372 QualType RetTy,
2373 llvm::Function *Fn,
2374 const CGFunctionInfo &FnInfo,
2375 const FunctionArgList &Args,
2377 SourceLocation StartLoc = SourceLocation());
2378
2380
2384 void EmitFunctionBody(const Stmt *Body);
2385 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2386
2387 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2388 CallArgList &CallArgs,
2389 const CGFunctionInfo *CallOpFnInfo = nullptr,
2390 llvm::Constant *CallOpFn = nullptr);
2394 CallArgList &CallArgs);
2396 const CGFunctionInfo **ImplFnInfo,
2397 llvm::Function **ImplFn);
2400 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2401 }
2402 void EmitAsanPrologueOrEpilogue(bool Prologue);
2403
2404 /// Emit the unified return block, trying to avoid its emission when
2405 /// possible.
2406 /// \return The debug location of the user written return statement if the
2407 /// return block is avoided.
2408 llvm::DebugLoc EmitReturnBlock();
2409
2410 /// FinishFunction - Complete IR generation of the current function. It is
2411 /// legal to call this function even if there is no current insertion point.
2413
2414 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2415 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2416
2417 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2418 const ThunkInfo *Thunk, bool IsUnprototyped);
2419
2421
2422 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2423 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2424 llvm::FunctionCallee Callee);
2425
2426 /// Generate a thunk for the given method.
2427 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2428 GlobalDecl GD, const ThunkInfo &Thunk,
2429 bool IsUnprototyped);
2430
2431 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2432 const CGFunctionInfo &FnInfo,
2433 GlobalDecl GD, const ThunkInfo &Thunk);
2434
2436 FunctionArgList &Args);
2437
2439
2440 /// Struct with all information about dynamic [sub]class needed to set vptr.
2441 struct VPtr {
2446 };
2447
2448 /// Initialize the vtable pointer of the given subobject.
2450
2452
2455
2457 CharUnits OffsetFromNearestVBase,
2458 bool BaseIsNonVirtualPrimaryBase,
2459 const CXXRecordDecl *VTableClass,
2460 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2461
2463
2464 // VTableTrapMode - whether we guarantee that loading the
2465 // vtable is guaranteed to trap on authentication failure,
2466 // even if the resulting vtable pointer is unused.
2467 enum class VTableAuthMode {
2469 MustTrap,
2470 UnsafeUbsanStrip // Should only be used for Vptr UBSan check
2471 };
2472 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2473 /// to by This.
2474 llvm::Value *
2475 GetVTablePtr(Address This, llvm::Type *VTableTy,
2476 const CXXRecordDecl *VTableClass,
2478
2487 };
2488
2489 /// Derived is the presumed address of an object of type T after a
2490 /// cast. If T is a polymorphic class type, emit a check that the virtual
2491 /// table for Derived belongs to a class derived from T.
2492 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2494
2495 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2496 /// If vptr CFI is enabled, emit a check that VTable is valid.
2497 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2499
2500 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2501 /// RD using llvm.type.test.
2502 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2504
2505 /// If whole-program virtual table optimization is enabled, emit an assumption
2506 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2507 /// enabled, emit a check that VTable is a member of RD's type identifier.
2509 llvm::Value *VTable, SourceLocation Loc);
2510
2511 /// Returns whether we should perform a type checked load when loading a
2512 /// virtual function for virtual calls to members of RD. This is generally
2513 /// true when both vcall CFI and whole-program-vtables are enabled.
2515
2516 /// Emit a type checked load from the given vtable.
2518 llvm::Value *VTable,
2519 llvm::Type *VTableTy,
2520 uint64_t VTableByteOffset);
2521
2522 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2523 /// given phase of destruction for a destructor. The end result
2524 /// should call destructors on members and base classes in reverse
2525 /// order of their construction.
2527
2528 /// ShouldInstrumentFunction - Return true if the current function should be
2529 /// instrumented with __cyg_profile_func_* calls
2531
2532 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2533 /// should not be instrumented with sanitizers.
2535
2536 /// ShouldXRayInstrument - Return true if the current function should be
2537 /// instrumented with XRay nop sleds.
2539
2540 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2541 /// XRay custom event handling calls.
2543
2544 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2545 /// XRay typed event handling calls.
2547
2548 /// Return a type hash constant for a function instrumented by
2549 /// -fsanitize=function.
2550 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2551
2552 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2553 /// arguments for the given function. This is also responsible for naming the
2554 /// LLVM function arguments.
2556 llvm::Function *Fn,
2557 const FunctionArgList &Args);
2558
2559 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2560 /// given temporary.
2561 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2562 SourceLocation EndLoc);
2563
2564 /// Emit a test that checks if the return value \p RV is nonnull.
2565 void EmitReturnValueCheck(llvm::Value *RV);
2566
2567 /// EmitStartEHSpec - Emit the start of the exception spec.
2568 void EmitStartEHSpec(const Decl *D);
2569
2570 /// EmitEndEHSpec - Emit the end of the exception spec.
2571 void EmitEndEHSpec(const Decl *D);
2572
2573 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2574 llvm::BasicBlock *getTerminateLandingPad();
2575
2576 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2577 /// terminate.
2578 llvm::BasicBlock *getTerminateFunclet();
2579
2580 /// getTerminateHandler - Return a handler (not a landing pad, just
2581 /// a catch handler) that just calls terminate. This is used when
2582 /// a terminate scope encloses a try.
2583 llvm::BasicBlock *getTerminateHandler();
2584
2586 llvm::Type *ConvertType(QualType T);
2588 llvm::Type *LLVMTy = nullptr);
2589 llvm::Type *ConvertType(const TypeDecl *T) {
2590 return ConvertType(getContext().getTypeDeclType(T));
2591 }
2592
2593 /// LoadObjCSelf - Load the value of self. This function is only valid while
2594 /// generating code for an Objective-C method.
2595 llvm::Value *LoadObjCSelf();
2596
2597 /// TypeOfSelfObject - Return type of object that this self represents.
2599
2600 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2602
2604 return getEvaluationKind(T) == TEK_Scalar;
2605 }
2606
2609 }
2610
2611 /// createBasicBlock - Create an LLVM basic block.
2612 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2613 llvm::Function *parent = nullptr,
2614 llvm::BasicBlock *before = nullptr) {
2615 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2616 }
2617
2618 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2619 /// label maps to.
2621
2622 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2623 /// another basic block, simplify it. This assumes that no other code could
2624 /// potentially reference the basic block.
2625 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2626
2627 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2628 /// adding a fall-through branch from the current insert block if
2629 /// necessary. It is legal to call this function even if there is no current
2630 /// insertion point.
2631 ///
2632 /// IsFinished - If true, indicates that the caller has finished emitting
2633 /// branches to the given block and does not expect to emit code into it. This
2634 /// means the block can be ignored if it is unreachable.
2635 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2636
2637 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2638 /// near its uses, and leave the insertion point in it.
2639 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2640
2641 /// EmitBranch - Emit a branch to the specified basic block from the current
2642 /// insert block, taking care to avoid creation of branches from dummy
2643 /// blocks. It is legal to call this function even if there is no current
2644 /// insertion point.
2645 ///
2646 /// This function clears the current insertion point. The caller should follow
2647 /// calls to this function with calls to Emit*Block prior to generation new
2648 /// code.
2649 void EmitBranch(llvm::BasicBlock *Block);
2650
2651 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2652 /// indicates that the current code being emitted is unreachable.
2653 bool HaveInsertPoint() const {
2654 return Builder.GetInsertBlock() != nullptr;
2655 }
2656
2657 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2658 /// emitted IR has a place to go. Note that by definition, if this function
2659 /// creates a block then that block is unreachable; callers may do better to
2660 /// detect when no insertion point is defined and simply skip IR generation.
2662 if (!HaveInsertPoint())
2664 }
2665
2666 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2667 /// specified stmt yet.
2668 void ErrorUnsupported(const Stmt *S, const char *Type);
2669
2670 //===--------------------------------------------------------------------===//
2671 // Helpers
2672 //===--------------------------------------------------------------------===//
2673
2675 llvm::BasicBlock *LHSBlock,
2676 llvm::BasicBlock *RHSBlock,
2677 llvm::BasicBlock *MergeBlock,
2678 QualType MergedType) {
2679 Builder.SetInsertPoint(MergeBlock);
2680 llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
2681 PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
2682 PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
2683 LHS.replaceBasePointer(PtrPhi);
2684 LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
2685 return LHS;
2686 }
2687
2688 /// Construct an address with the natural alignment of T. If a pointer to T
2689 /// is expected to be signed, the pointer passed to this function must have
2690 /// been signed, and the returned Address will have the pointer authentication
2691 /// information needed to authenticate the signed pointer.
2693 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2694 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2695 TBAAAccessInfo *TBAAInfo = nullptr,
2696 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2697 if (Alignment.isZero())
2698 Alignment =
2699 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
2700 return Address(Ptr, ConvertTypeForMem(T), Alignment,
2701 CGM.getPointerAuthInfoForPointeeType(T), /*Offset=*/nullptr,
2702 IsKnownNonNull);
2703 }
2704
2707 return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
2709 }
2710
2712 TBAAAccessInfo TBAAInfo) {
2713 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2714 }
2715
2716 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2718 return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
2720 }
2721
2722 /// Same as MakeAddrLValue above except that the pointer is known to be
2723 /// unsigned.
2724 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2726 Address Addr(V, ConvertTypeForMem(T), Alignment);
2727 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2729 }
2730
2731 LValue
2734 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2735 TBAAAccessInfo());
2736 }
2737
2738 /// Given a value of type T* that may not be to a complete object, construct
2739 /// an l-value with the natural pointee alignment of T.
2741
2742 LValue
2744 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
2745
2746 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2747 /// to be unsigned.
2749
2751
2753 LValueBaseInfo *PointeeBaseInfo = nullptr,
2754 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2757 AlignmentSource Source =
2759 LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2760 CGM.getTBAAAccessInfo(RefTy));
2761 return EmitLoadOfReferenceLValue(RefLVal);
2762 }
2763
2764 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2765 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2766 /// it is loaded from.
2768 LValueBaseInfo *BaseInfo = nullptr,
2769 TBAAAccessInfo *TBAAInfo = nullptr);
2771
2772private:
2773 struct AllocaTracker {
2774 void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
2775 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2776
2777 private:
2779 };
2780 AllocaTracker *Allocas = nullptr;
2781
2782public:
2783 // Captures all the allocas created during the scope of its RAII object.
2786 : CGF(CGF), OldTracker(CGF.Allocas) {
2787 CGF.Allocas = &Tracker;
2788 }
2789 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2790
2791 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2792
2793 private:
2794 CodeGenFunction &CGF;
2795 AllocaTracker *OldTracker;
2796 AllocaTracker Tracker;
2797 };
2798
2799 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2800 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2801 /// insertion point of the builder. The caller is responsible for setting an
2802 /// appropriate alignment on
2803 /// the alloca.
2804 ///
2805 /// \p ArraySize is the number of array elements to be allocated if it
2806 /// is not nullptr.
2807 ///
2808 /// LangAS::Default is the address space of pointers to local variables and
2809 /// temporaries, as exposed in the source language. In certain
2810 /// configurations, this is not the same as the alloca address space, and a
2811 /// cast is needed to lift the pointer from the alloca AS into
2812 /// LangAS::Default. This can happen when the target uses a restricted
2813 /// address space for the stack but the source language requires
2814 /// LangAS::Default to be a generic address space. The latter condition is
2815 /// common for most programming languages; OpenCL is an exception in that
2816 /// LangAS::Default is the private address space, which naturally maps
2817 /// to the stack.
2818 ///
2819 /// Because the address of a temporary is often exposed to the program in
2820 /// various ways, this function will perform the cast. The original alloca
2821 /// instruction is returned through \p Alloca if it is not nullptr.
2822 ///
2823 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2824 /// more efficient if the caller knows that the address will not be exposed.
2825 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2826 llvm::Value *ArraySize = nullptr);
2828 const Twine &Name = "tmp",
2829 llvm::Value *ArraySize = nullptr,
2830 RawAddress *Alloca = nullptr);
2832 const Twine &Name = "tmp",
2833 llvm::Value *ArraySize = nullptr);
2834
2835 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2836 /// default ABI alignment of the given LLVM type.
2837 ///
2838 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2839 /// any given AST type that happens to have been lowered to the
2840 /// given IR type. This should only ever be used for function-local,
2841 /// IR-driven manipulations like saving and restoring a value. Do
2842 /// not hand this address off to arbitrary IRGen routines, and especially
2843 /// do not pass it as an argument to a function that might expect a
2844 /// properly ABI-aligned value.
2846 const Twine &Name = "tmp");
2847
2848 /// CreateIRTemp - Create a temporary IR object of the given type, with
2849 /// appropriate alignment. This routine should only be used when an temporary
2850 /// value needs to be stored into an alloca (for example, to avoid explicit
2851 /// PHI construction), but the type is the IR type, not the type appropriate
2852 /// for storing in memory.
2853 ///
2854 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2855 /// ConvertType instead of ConvertTypeForMem.
2856 RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
2857
2858 /// CreateMemTemp - Create a temporary memory object of the given type, with
2859 /// appropriate alignmen and cast it to the default address space. Returns
2860 /// the original alloca instruction by \p Alloca if it is not nullptr.
2861 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2862 RawAddress *Alloca = nullptr);
2864 const Twine &Name = "tmp",
2865 RawAddress *Alloca = nullptr);
2866
2867 /// CreateMemTemp - Create a temporary memory object of the given type, with
2868 /// appropriate alignmen without casting it to the default address space.
2869 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2871 const Twine &Name = "tmp");
2872
2873 /// CreateAggTemp - Create a temporary memory object for the given
2874 /// aggregate type.
2875 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2876 RawAddress *Alloca = nullptr) {
2877 return AggValueSlot::forAddr(
2878 CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
2881 }
2882
2883 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2884 /// expression and compare the result against zero, returning an Int1Ty value.
2885 llvm::Value *EvaluateExprAsBool(const Expr *E);
2886
2887 /// Retrieve the implicit cast expression of the rhs in a binary operator
2888 /// expression by passing pointers to Value and QualType
2889 /// This is used for implicit bitfield conversion checks, which
2890 /// must compare with the value before potential truncation.
2892 llvm::Value **Previous,
2893 QualType *SrcType);
2894
2895 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2896 /// so we use the value after conversion.
2897 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2898 llvm::Value *Dst, QualType DstType,
2899 const CGBitFieldInfo &Info,
2901
2902 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2903 void EmitIgnoredExpr(const Expr *E);
2904
2905 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2906 /// any type. The result is returned as an RValue struct. If this is an
2907 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2908 /// the result should be returned.
2909 ///
2910 /// \param ignoreResult True if the resulting value isn't used.
2913 bool ignoreResult = false);
2914
2915 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2916 // or the value of the expression, depending on how va_list is defined.
2918
2919 /// Emit a "reference" to a __builtin_ms_va_list; this is
2920 /// always the value of the expression, because a __builtin_ms_va_list is a
2921 /// pointer to a char.
2923
2924 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2925 /// always be accessible even if no aggregate location is provided.
2927
2928 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2929 /// arbitrary expression into the given memory location.
2930 void EmitAnyExprToMem(const Expr *E, Address Location,
2931 Qualifiers Quals, bool IsInitializer);
2932
2933 void EmitAnyExprToExn(const Expr *E, Address Addr);
2934
2935 /// EmitInitializationToLValue - Emit an initializer to an LValue.
2937 const Expr *E, LValue LV,
2939
2940 /// EmitExprAsInit - Emits the code necessary to initialize a
2941 /// location in memory with the given initializer.
2942 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2943 bool capturedByInit);
2944
2945 /// hasVolatileMember - returns true if aggregate type has a volatile
2946 /// member.
2948 if (const RecordType *RT = T->getAs<RecordType>()) {
2949 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2950 return RD->hasVolatileMember();
2951 }
2952 return false;
2953 }
2954
2955 /// Determine whether a return value slot may overlap some other object.
2957 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2958 // class subobjects. These cases may need to be revisited depending on the
2959 // resolution of the relevant core issue.
2961 }
2962
2963 /// Determine whether a field initialization may overlap some other object.
2965
2966 /// Determine whether a base class initialization may overlap some other
2967 /// object.
2969 const CXXRecordDecl *BaseRD,
2970 bool IsVirtual);
2971
2972 /// Emit an aggregate assignment.
2974 bool IsVolatile = hasVolatileMember(EltTy);
2975 EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2976 }
2977
2979 AggValueSlot::Overlap_t MayOverlap) {
2980 EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2981 }
2982
2983 /// EmitAggregateCopy - Emit an aggregate copy.
2984 ///
2985 /// \param isVolatile \c true iff either the source or the destination is
2986 /// volatile.
2987 /// \param MayOverlap Whether the tail padding of the destination might be
2988 /// occupied by some other object. More efficient code can often be
2989 /// generated if not.
2991 AggValueSlot::Overlap_t MayOverlap,
2992 bool isVolatile = false);
2993
2994 /// GetAddrOfLocalVar - Return the address of a local variable.
2996 auto it = LocalDeclMap.find(VD);
2997 assert(it != LocalDeclMap.end() &&
2998 "Invalid argument to GetAddrOfLocalVar(), no decl!");
2999 return it->second;
3000 }
3001
3002 /// Given an opaque value expression, return its LValue mapping if it exists,
3003 /// otherwise create one.
3005
3006 /// Given an opaque value expression, return its RValue mapping if it exists,
3007 /// otherwise create one.
3009
3010 /// Get the index of the current ArrayInitLoopExpr, if any.
3011 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3012
3013 /// getAccessedFieldNo - Given an encoded value and a result number, return
3014 /// the input field number being accessed.
3015 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3016
3017 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3018 llvm::BasicBlock *GetIndirectGotoBlock();
3019
3020 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3021 static bool IsWrappedCXXThis(const Expr *E);
3022
3023 /// EmitNullInitialization - Generate code to set a value of the given type to
3024 /// null, If the type contains data member pointers, they will be initialized
3025 /// to -1 in accordance with the Itanium C++ ABI.
3027
3028 /// Emits a call to an LLVM variable-argument intrinsic, either
3029 /// \c llvm.va_start or \c llvm.va_end.
3030 /// \param ArgValue A reference to the \c va_list as emitted by either
3031 /// \c EmitVAListRef or \c EmitMSVAListRef.
3032 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3033 /// calls \c llvm.va_end.
3034 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3035
3036 /// Generate code to get an argument from the passed in pointer
3037 /// and update it accordingly.
3038 /// \param VE The \c VAArgExpr for which to generate code.
3039 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3040 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3041 /// \returns A pointer to the argument.
3042 // FIXME: We should be able to get rid of this method and use the va_arg
3043 // instruction in LLVM instead once it works well enough.
3046
3047 /// emitArrayLength - Compute the length of an array, even if it's a
3048 /// VLA, and drill down to the base element type.
3050 QualType &baseType,
3051 Address &addr);
3052
3053 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3054 /// the given variably-modified type and store them in the VLASizeMap.
3055 ///
3056 /// This function can be called with a null (unreachable) insert point.
3058
3060 llvm::Value *NumElts;
3062
3063 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3064 };
3065
3066 /// Return the number of elements for a single dimension
3067 /// for the given array type.
3070
3071 /// Returns an LLVM value that corresponds to the size,
3072 /// in non-variably-sized elements, of a variable length array type,
3073 /// plus that largest non-variably-sized element type. Assumes that
3074 /// the type has already been emitted with EmitVariablyModifiedType.
3077
3078 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3079 /// generating code for an C++ member function.
3080 llvm::Value *LoadCXXThis() {
3081 assert(CXXThisValue && "no 'this' value for this function");
3082 return CXXThisValue;
3083 }
3085
3086 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3087 /// virtual bases.
3088 // FIXME: Every place that calls LoadCXXVTT is something
3089 // that needs to be abstracted properly.
3090 llvm::Value *LoadCXXVTT() {
3091 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3092 return CXXStructorImplicitParamValue;
3093 }
3094
3095 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3096 /// complete class to the given direct base.
3097 Address
3099 const CXXRecordDecl *Derived,
3100 const CXXRecordDecl *Base,
3101 bool BaseIsVirtual);
3102
3103 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3104
3105 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3106 /// load of 'this' and returns address of the base class.
3108 const CXXRecordDecl *Derived,
3111 bool NullCheckValue, SourceLocation Loc);
3112
3114 const CXXRecordDecl *Derived,
3117 bool NullCheckValue);
3118
3119 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3120 /// base constructor/destructor with virtual bases.
3121 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3122 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3123 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3124 bool Delegating);
3125
3127 CXXCtorType CtorType,
3128 const FunctionArgList &Args,
3130 // It's important not to confuse this and the previous function. Delegating
3131 // constructors are the C++0x feature. The constructor delegate optimization
3132 // is used to reduce duplication in the base and complete consturctors where
3133 // they are substantially the same.
3135 const FunctionArgList &Args);
3136
3137 /// Emit a call to an inheriting constructor (that is, one that invokes a
3138 /// constructor inherited from a base class) by inlining its definition. This
3139 /// is necessary if the ABI does not support forwarding the arguments to the
3140 /// base class constructor (because they're variadic or similar).
3142 CXXCtorType CtorType,
3143 bool ForVirtualBase,
3144 bool Delegating,
3145 CallArgList &Args);
3146
3147 /// Emit a call to a constructor inherited from a base class, passing the
3148 /// current constructor's arguments along unmodified (without even making
3149 /// a copy).
3151 bool ForVirtualBase, Address This,
3152 bool InheritedFromVBase,
3154
3156 bool ForVirtualBase, bool Delegating,
3157 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3158
3160 bool ForVirtualBase, bool Delegating,
3161 Address This, CallArgList &Args,
3163 SourceLocation Loc, bool NewPointerIsChecked,
3164 llvm::CallBase **CallOrInvoke = nullptr);
3165
3166 /// Emit assumption load for all bases. Requires to be called only on
3167 /// most-derived class and not under construction of the object.
3169
3170 /// Emit assumption that vptr load == global vtable.
3171 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3172
3174 Address This, Address Src,
3175 const CXXConstructExpr *E);
3176
3178 const ArrayType *ArrayTy,
3179 Address ArrayPtr,
3180 const CXXConstructExpr *E,
3181 bool NewPointerIsChecked,
3182 bool ZeroInitialization = false);
3183
3185 llvm::Value *NumElements,
3186 Address ArrayPtr,
3187 const CXXConstructExpr *E,
3188 bool NewPointerIsChecked,
3189 bool ZeroInitialization = false);
3190
3192
3194 bool ForVirtualBase, bool Delegating, Address This,
3195 QualType ThisTy);
3196
3198 llvm::Type *ElementTy, Address NewPtr,
3199 llvm::Value *NumElements,
3200 llvm::Value *AllocSizeWithoutCookie);
3201
3202 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3203 Address Ptr);
3204
3209
3210 llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
3211 void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
3212
3213 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3215
3216 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3217 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3218 CharUnits CookieSize = CharUnits());
3219
3221 const CallExpr *TheCallExpr, bool IsDelete);
3222
3223 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3224 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3226
3227 /// Situations in which we might emit a check for the suitability of a
3228 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3229 /// compiler-rt.
3231 /// Checking the operand of a load. Must be suitably sized and aligned.
3233 /// Checking the destination of a store. Must be suitably sized and aligned.
3235 /// Checking the bound value in a reference binding. Must be suitably sized
3236 /// and aligned, but is not required to refer to an object (until the
3237 /// reference is used), per core issue 453.
3239 /// Checking the object expression in a non-static data member access. Must
3240 /// be an object within its lifetime.
3242 /// Checking the 'this' pointer for a call to a non-static member function.
3243 /// Must be an object within its lifetime.
3245 /// Checking the 'this' pointer for a constructor call.
3247 /// Checking the operand of a static_cast to a derived pointer type. Must be
3248 /// null or an object within its lifetime.
3250 /// Checking the operand of a static_cast to a derived reference type. Must
3251 /// be an object within its lifetime.
3253 /// Checking the operand of a cast to a base object. Must be suitably sized
3254 /// and aligned.
3256 /// Checking the operand of a cast to a virtual base object. Must be an
3257 /// object within its lifetime.
3259 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3261 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3262 /// null or an object within its lifetime.
3265
3266 /// Determine whether the pointer type check \p TCK permits null pointers.
3268
3269 /// Determine whether the pointer type check \p TCK requires a vptr check.
3271
3272 /// Whether any type-checking sanitizers are enabled. If \c false,
3273 /// calls to EmitTypeCheck can be skipped.
3275
3277 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3278 llvm::Value *ArraySize = nullptr) {
3280 return;
3281 EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
3282 SkippedChecks, ArraySize);
3283 }
3284
3286 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3287 SanitizerSet SkippedChecks = SanitizerSet(),
3288 llvm::Value *ArraySize = nullptr) {
3290 return;
3291 EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
3292 SkippedChecks, ArraySize);
3293 }
3294
3295 /// Emit a check that \p V is the address of storage of the
3296 /// appropriate size and alignment for an object of type \p Type
3297 /// (or if ArraySize is provided, for an array of that bound).
3299 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3300 SanitizerSet SkippedChecks = SanitizerSet(),
3301 llvm::Value *ArraySize = nullptr);
3302
3303 /// Emit a check that \p Base points into an array object, which
3304 /// we can access at index \p Index. \p Accessed should be \c false if we
3305 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3306 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
3307 QualType IndexType, bool Accessed);
3308 void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
3309 llvm::Value *Index, QualType IndexType,
3310 QualType IndexedType, bool Accessed);
3311
3312 // Find a struct's flexible array member and get its offset. It may be
3313 // embedded inside multiple sub-structs, but must still be the last field.
3314 const FieldDecl *
3316 const FieldDecl *FAMDecl,
3317 uint64_t &Offset);
3318
3320 const FieldDecl *FAMDecl,
3321 const FieldDecl *CountDecl);
3322
3323 /// Build an expression accessing the "counted_by" field.
3325 const FieldDecl *FAMDecl,
3326 const FieldDecl *CountDecl);
3327
3329 bool isInc, bool isPre);
3331 bool isInc, bool isPre);
3332
3333 /// Converts Location to a DebugLoc, if debug information is enabled.
3334 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3335
3336 /// Get the record field index as represented in debug info.
3337 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3338
3339
3340 //===--------------------------------------------------------------------===//
3341 // Declaration Emission
3342 //===--------------------------------------------------------------------===//
3343
3344 /// EmitDecl - Emit a declaration.
3345 ///
3346 /// This function can be called with a null (unreachable) insert point.
3347 void EmitDecl(const Decl &D);
3348
3349 /// EmitVarDecl - Emit a local variable declaration.
3350 ///
3351 /// This function can be called with a null (unreachable) insert point.
3352 void EmitVarDecl(const VarDecl &D);
3353
3354 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3355 bool capturedByInit);
3356
3358 llvm::Value *Address);
3359
3360 /// Determine whether the given initializer is trivial in the sense
3361 /// that it requires no code to be generated.
3363
3364 /// EmitAutoVarDecl - Emit an auto variable declaration.
3365 ///
3366 /// This function can be called with a null (unreachable) insert point.
3368
3370 friend class CodeGenFunction;
3371
3372 const VarDecl *Variable;
3373
3374 /// The address of the alloca for languages with explicit address space
3375 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3376 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3377 /// as a global constant.
3378 Address Addr;
3379
3380 llvm::Value *NRVOFlag;
3381
3382 /// True if the variable is a __block variable that is captured by an
3383 /// escaping block.
3384 bool IsEscapingByRef;
3385
3386 /// True if the variable is of aggregate type and has a constant
3387 /// initializer.
3388 bool IsConstantAggregate;
3389
3390 /// Non-null if we should use lifetime annotations.
3391 llvm::Value *SizeForLifetimeMarkers;
3392
3393 /// Address with original alloca instruction. Invalid if the variable was
3394 /// emitted as a global constant.
3395 RawAddress AllocaAddr;
3396
3397 struct Invalid {};
3398 AutoVarEmission(Invalid)
3399 : Variable(nullptr), Addr(Address::invalid()),
3400 AllocaAddr(RawAddress::invalid()) {}
3401
3402 AutoVarEmission(const VarDecl &variable)
3403 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3404 IsEscapingByRef(false), IsConstantAggregate(false),
3405 SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
3406
3407 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3408
3409 public:
3410 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3411
3412 bool useLifetimeMarkers() const {
3413 return SizeForLifetimeMarkers != nullptr;
3414 }
3415 llvm::Value *getSizeForLifetimeMarkers() const {
3416 assert(useLifetimeMarkers());
3417 return SizeForLifetimeMarkers;
3418 }
3419
3420 /// Returns the raw, allocated address, which is not necessarily
3421 /// the address of the object itself. It is casted to default
3422 /// address space for address space agnostic languages.
3424 return Addr;
3425 }
3426
3427 /// Returns the address for the original alloca instruction.
3428 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3429
3430 /// Returns the address of the object within this declaration.
3431 /// Note that this does not chase the forwarding pointer for
3432 /// __block decls.
3434 if (!IsEscapingByRef) return Addr;
3435
3436 return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
3437 }
3438 };
3440 void EmitAutoVarInit(const AutoVarEmission &emission);
3443 QualType::DestructionKind dtorKind);
3444
3445 /// Emits the alloca and debug information for the size expressions for each
3446 /// dimension of an array. It registers the association of its (1-dimensional)
3447 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3448 /// reference this node when creating the DISubrange object to describe the
3449 /// array types.
3451 const VarDecl &D,
3452 bool EmitDebugInfo);
3453
3455 llvm::GlobalValue::LinkageTypes Linkage);
3456
3458 union {
3460 llvm::Value *Value;
3461 };
3462
3463 bool IsIndirect;
3464
3465 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3466 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3467
3468 public:
3469 static ParamValue forDirect(llvm::Value *value) {
3470 return ParamValue(value);
3471 }
3473 assert(!addr.getAlignment().isZero());
3474 return ParamValue(addr);
3475 }
3476
3477 bool isIndirect() const { return IsIndirect; }
3478 llvm::Value *getAnyValue() const {
3479 if (!isIndirect())
3480 return Value;
3481 assert(!Addr.hasOffset() && "unexpected offset");
3482 return Addr.getBasePointer();
3483 }
3484
3485 llvm::Value *getDirectValue() const {
3486 assert(!isIndirect());
3487 return Value;
3488 }
3489
3491 assert(isIndirect());
3492 return Addr;
3493 }
3494 };
3495
3496 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3497 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3498
3499 /// protectFromPeepholes - Protect a value that we're intending to
3500 /// store to the side, but which will probably be used later, from
3501 /// aggressive peepholing optimizations that might delete it.
3502 ///
3503 /// Pass the result to unprotectFromPeepholes to declare that
3504 /// protection is no longer required.
3505 ///
3506 /// There's no particular reason why this shouldn't apply to
3507 /// l-values, it's just that no existing peepholes work on pointers.
3510
3511 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3513 SourceLocation AssumptionLoc,
3514 llvm::Value *Alignment,
3515 llvm::Value *OffsetValue,
3516 llvm::Value *TheCheck,
3517 llvm::Instruction *Assumption);
3518
3519 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3520 SourceLocation Loc, SourceLocation AssumptionLoc,
3521 llvm::Value *Alignment,
3522 llvm::Value *OffsetValue = nullptr);
3523
3524 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3525 SourceLocation AssumptionLoc,
3526 llvm::Value *Alignment,
3527 llvm::Value *OffsetValue = nullptr);
3528
3529 //===--------------------------------------------------------------------===//
3530 // Statement Emission
3531 //===--------------------------------------------------------------------===//
3532
3533 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3534 void EmitStopPoint(const Stmt *S);
3535
3536 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3537 /// this function even if there is no current insertion point.
3538 ///
3539 /// This function may clear the current insertion point; callers should use
3540 /// EnsureInsertPoint if they wish to subsequently generate code without first
3541 /// calling EmitBlock, EmitBranch, or EmitStmt.
3542 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = {});
3543
3544 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3545 /// necessarily require an insertion point or debug information; typically
3546 /// because the statement amounts to a jump or a container of other
3547 /// statements.
3548 ///
3549 /// \return True if the statement was handled.
3551
3552 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3555 bool GetLast = false,
3556 AggValueSlot AVS =
3558
3559 /// EmitLabel - Emit the block for the given label. It is legal to call this
3560 /// function even if there is no current insertion point.
3561 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3562
3563 void EmitLabelStmt(const LabelStmt &S);
3565 void EmitGotoStmt(const GotoStmt &S);
3567 void EmitIfStmt(const IfStmt &S);
3568
3570 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = {});
3571 void EmitForStmt(const ForStmt &S, ArrayRef<const Attr *> Attrs = {});
3573 void EmitDeclStmt(const DeclStmt &S);
3574 void EmitBreakStmt(const BreakStmt &S);
3580 void EmitAsmStmt(const AsmStmt &S);
3581
3587
3592 bool ignoreResult = false);
3596 bool ignoreResult = false);
3598 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3599
3600 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3601 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3602
3608 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3610
3612 llvm::Function *FinallyFunc);
3614 const Stmt *OutlinedStmt);
3615
3617 const SEHExceptStmt &Except);
3618
3620 const SEHFinallyStmt &Finally);
3621
3623 llvm::Value *ParentFP,
3624 llvm::Value *EntryEBP);
3625 llvm::Value *EmitSEHExceptionCode();
3626 llvm::Value *EmitSEHExceptionInfo();
3628
3629 /// Emit simple code for OpenMP directives in Simd-only mode.
3631
3632 /// Scan the outlined statement for captures from the parent function. For
3633 /// each capture, mark the capture as escaped and emit a call to
3634 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3636 bool IsFilter);
3637
3638 /// Recovers the address of a local in a parent function. ParentVar is the
3639 /// address of the variable used in the immediate parent function. It can
3640 /// either be an alloca or a call to llvm.localrecover if there are nested
3641 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3642 /// frame.
3644 Address ParentVar,
3645 llvm::Value *ParentFP);
3646
3648 ArrayRef<const Attr *> Attrs = {});
3649
3650 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3652 CodeGenFunction &CGF;
3653
3654 public:
3656 bool HasCancel)
3657 : CGF(CGF) {
3658 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3659 }
3660 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3661 };
3662
3663 /// Returns calculated size of the specified type.
3664 llvm::Value *getTypeSize(QualType Ty);
3672 SmallVectorImpl<llvm::Value *> &CapturedVars);
3673 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3675 /// Perform element by element copying of arrays with type \a
3676 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3677 /// generated by \a CopyGen.
3678 ///
3679 /// \param DestAddr Address of the destination array.
3680 /// \param SrcAddr Address of the source array.
3681 /// \param OriginalType Type of destination and source arrays.
3682 /// \param CopyGen Copying procedure that copies value of single array element
3683 /// to another single array element.
3685 Address DestAddr, Address SrcAddr, QualType OriginalType,
3686 const llvm::function_ref<void(Address, Address)> CopyGen);
3687 /// Emit proper copying of data from one variable to another.
3688 ///
3689 /// \param OriginalType Original type of the copied variables.
3690 /// \param DestAddr Destination address.
3691 /// \param SrcAddr Source address.
3692 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3693 /// type of the base array element).
3694 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3695 /// the base array element).
3696 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3697 /// DestVD.
3698 void EmitOMPCopy(QualType OriginalType,
3699 Address DestAddr, Address SrcAddr,
3700 const VarDecl *DestVD, const VarDecl *SrcVD,
3701 const Expr *Copy);
3702 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3703 /// \a X = \a E \a BO \a E.
3704 ///
3705 /// \param X Value to be updated.
3706 /// \param E Update value.
3707 /// \param BO Binary operation for update operation.
3708 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3709 /// expression, false otherwise.
3710 /// \param AO Atomic ordering of the generated atomic instructions.
3711 /// \param CommonGen Code generator for complex expressions that cannot be
3712 /// expressed through atomicrmw instruction.
3713 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3714 /// generated, <false, RValue::get(nullptr)> otherwise.
3715 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3716 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3717 llvm::AtomicOrdering AO, SourceLocation Loc,
3718 const llvm::function_ref<RValue(RValue)> CommonGen);
3720 OMPPrivateScope &PrivateScope);
3722 OMPPrivateScope &PrivateScope);
3724 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3725 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3726 CaptureDeviceAddrMap);
3728 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3729 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3730 CaptureDeviceAddrMap);
3731 /// Emit code for copyin clause in \a D directive. The next code is
3732 /// generated at the start of outlined functions for directives:
3733 /// \code
3734 /// threadprivate_var1 = master_threadprivate_var1;
3735 /// operator=(threadprivate_var2, master_threadprivate_var2);
3736 /// ...
3737 /// __kmpc_barrier(&loc, global_tid);
3738 /// \endcode
3739 ///
3740 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3741 /// \returns true if at least one copyin variable is found, false otherwise.
3743 /// Emit initial code for lastprivate variables. If some variable is
3744 /// not also firstprivate, then the default initialization is used. Otherwise
3745 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3746 /// method.
3747 ///
3748 /// \param D Directive that may have 'lastprivate' directives.
3749 /// \param PrivateScope Private scope for capturing lastprivate variables for
3750 /// proper codegen in internal captured statement.
3751 ///
3752 /// \returns true if there is at least one lastprivate variable, false
3753 /// otherwise.
3755 OMPPrivateScope &PrivateScope);
3756 /// Emit final copying of lastprivate values to original variables at
3757 /// the end of the worksharing or simd directive.
3758 ///
3759 /// \param D Directive that has at least one 'lastprivate' directives.
3760 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3761 /// it is the last iteration of the loop code in associated directive, or to
3762 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3764 bool NoFinals,
3765 llvm::Value *IsLastIterCond = nullptr);
3766 /// Emit initial code for linear clauses.
3768 CodeGenFunction::OMPPrivateScope &PrivateScope);
3769 /// Emit final code for linear clauses.
3770 /// \param CondGen Optional conditional code for final part of codegen for
3771 /// linear clause.
3773 const OMPLoopDirective &D,
3774 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3775 /// Emit initial code for reduction variables. Creates reduction copies
3776 /// and initializes them with the values according to OpenMP standard.
3777 ///
3778 /// \param D Directive (possibly) with the 'reduction' clause.
3779 /// \param PrivateScope Private scope for capturing reduction variables for
3780 /// proper codegen in internal captured statement.
3781 ///
3783 OMPPrivateScope &PrivateScope,
3784 bool ForInscan = false);
3785 /// Emit final update of reduction values to original variables at
3786 /// the end of the directive.
3787 ///
3788 /// \param D Directive that has at least one 'reduction' directives.
3789 /// \param ReductionKind The kind of reduction to perform.
3791 const OpenMPDirectiveKind ReductionKind);
3792 /// Emit initial code for linear variables. Creates private copies
3793 /// and initializes them with the values according to OpenMP standard.
3794 ///
3795 /// \param D Directive (possibly) with the 'linear' clause.
3796 /// \return true if at least one linear variable is found that should be
3797 /// initialized with the value of the original variable, false otherwise.
3799
3800 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3801 llvm::Function * /*OutlinedFn*/,
3802 const OMPTaskDataTy & /*Data*/)>
3805 const OpenMPDirectiveKind CapturedRegion,
3806 const RegionCodeGenTy &BodyGen,
3807 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3814 explicit OMPTargetDataInfo() = default;
3817 unsigned NumberOfTargetItems)
3821 };
3823 const RegionCodeGenTy &BodyGen,
3824 OMPTargetDataInfo &InputInfo);
3827 CodeGenFunction &CGF,
3828 const CapturedStmt *CS,
3867 void
3870 void
3878 void
3880 void
3898 void
3923
3924 /// Emit device code for the target directive.
3926 StringRef ParentName,
3927 const OMPTargetDirective &S);
3928 static void
3931 /// Emit device code for the target parallel for directive.
3933 CodeGenModule &CGM, StringRef ParentName,
3935 /// Emit device code for the target parallel for simd directive.
3937 CodeGenModule &CGM, StringRef ParentName,
3939 /// Emit device code for the target teams directive.
3940 static void
3942 const OMPTargetTeamsDirective &S);
3943 /// Emit device code for the target teams distribute directive.
3945 CodeGenModule &CGM, StringRef ParentName,
3947 /// Emit device code for the target teams distribute simd directive.
3949 CodeGenModule &CGM, StringRef ParentName,
3951 /// Emit device code for the target simd directive.
3953 StringRef ParentName,
3954 const OMPTargetSimdDirective &S);
3955 /// Emit device code for the target teams distribute parallel for simd
3956 /// directive.
3958 CodeGenModule &CGM, StringRef ParentName,
3960
3961 /// Emit device code for the target teams loop directive.
3963 CodeGenModule &CGM, StringRef ParentName,
3965
3966 /// Emit device code for the target parallel loop directive.
3968 CodeGenModule &CGM, StringRef ParentName,
3970
3972 CodeGenModule &CGM, StringRef ParentName,
3974
3975 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
3976 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
3977 /// future it is meant to be the number of loops expected in the loop nests
3978 /// (usually specified by the "collapse" clause) that are collapsed to a
3979 /// single loop by this function.
3980 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
3981 int Depth);
3982
3983 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
3985
3986 /// Emit inner loop of the worksharing/simd construct.
3987 ///
3988 /// \param S Directive, for which the inner loop must be emitted.
3989 /// \param RequiresCleanup true, if directive has some associated private
3990 /// variables.
3991 /// \param LoopCond Bollean condition for loop continuation.
3992 /// \param IncExpr Increment expression for loop control variable.
3993 /// \param BodyGen Generator for the inner body of the inner loop.
3994 /// \param PostIncGen Genrator for post-increment code (required for ordered
3995 /// loop directvies).
3997 const OMPExecutableDirective &S, bool RequiresCleanup,
3998 const Expr *LoopCond, const Expr *IncExpr,
3999 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
4000 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
4001
4003 /// Emit initial code for loop counters of loop-based directives.
4005 OMPPrivateScope &LoopScope);
4006
4007 /// Helper for the OpenMP loop directives.
4009
4010 /// Emit code for the worksharing loop-based directive.
4011 /// \return true, if this construct has any lastprivate clause, false -
4012 /// otherwise.
4014 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
4015 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4016
4017 /// Emit code for the distribute loop-based directive.
4019 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4020
4021 /// Helpers for the OpenMP loop directives.
4024 const OMPLoopDirective &D,
4025 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4026
4027 /// Emits the lvalue for the expression with possibly captured variable.
4029
4030private:
4031 /// Helpers for blocks.
4032 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4033
4034 /// struct with the values to be passed to the OpenMP loop-related functions
4035 struct OMPLoopArguments {
4036 /// loop lower bound
4038 /// loop upper bound
4040 /// loop stride
4042 /// isLastIteration argument for runtime functions
4044 /// Chunk value generated by sema
4045 llvm::Value *Chunk = nullptr;
4046 /// EnsureUpperBound
4047 Expr *EUB = nullptr;
4048 /// IncrementExpression
4049 Expr *IncExpr = nullptr;
4050 /// Loop initialization
4051 Expr *Init = nullptr;
4052 /// Loop exit condition
4053 Expr *Cond = nullptr;
4054 /// Update of LB after a whole chunk has been executed
4055 Expr *NextLB = nullptr;
4056 /// Update of UB after a whole chunk has been executed
4057 Expr *NextUB = nullptr;
4058 /// Distinguish between the for distribute and sections
4059 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4060 OMPLoopArguments() = default;
4061 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4062 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4063 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4064 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4065 Expr *NextUB = nullptr)
4066 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4067 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4068 NextUB(NextUB) {}
4069 };
4070 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4071 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4072 const OMPLoopArguments &LoopArgs,
4073 const CodeGenLoopTy &CodeGenLoop,
4074 const CodeGenOrderedTy &CodeGenOrdered);
4075 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4076 bool IsMonotonic, const OMPLoopDirective &S,
4077 OMPPrivateScope &LoopScope, bool Ordered,
4078 const OMPLoopArguments &LoopArgs,
4079 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4080 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4081 const OMPLoopDirective &S,
4082 OMPPrivateScope &LoopScope,
4083 const OMPLoopArguments &LoopArgs,
4084 const CodeGenLoopTy &CodeGenLoopContent);
4085 /// Emit code for sections directive.
4086 void EmitSections(const OMPExecutableDirective &S);
4087
4088public:
4089 //===--------------------------------------------------------------------===//
4090 // OpenACC Emission
4091 //===--------------------------------------------------------------------===//
4093 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4094 // simply emitting its structured block, but in the future we will implement
4095 // some sort of IR.
4096 EmitStmt(S.getStructuredBlock());
4097 }
4098
4100 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4101 // simply emitting its loop, but in the future we will implement
4102 // some sort of IR.
4103 EmitStmt(S.getLoop());
4104 }
4105
4107 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4108 // simply emitting its loop, but in the future we will implement
4109 // some sort of IR.
4110 EmitStmt(S.getLoop());
4111 }
4112
4114 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4115 // simply emitting its structured block, but in the future we will implement
4116 // some sort of IR.
4117 EmitStmt(S.getStructuredBlock());
4118 }
4119
4121 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4122 // but in the future we will implement some sort of IR.
4123 }
4124
4126 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4127 // but in the future we will implement some sort of IR.
4128 }
4129
4131 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4132 // simply emitting its structured block, but in the future we will implement
4133 // some sort of IR.
4134 EmitStmt(S.getStructuredBlock());
4135 }
4136
4138 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4139 // but in the future we will implement some sort of IR.
4140 }
4141
4143 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4144 // but in the future we will implement some sort of IR.
4145 }
4146
4148 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4149 // but in the future we will implement some sort of IR.
4150 }
4151
4153 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4154 // but in the future we will implement some sort of IR.
4155 }
4156
4158 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4159 // but in the future we will implement some sort of IR.
4160 }
4161
4162 //===--------------------------------------------------------------------===//
4163 // LValue Expression Emission
4164 //===--------------------------------------------------------------------===//
4165
4166 /// Create a check that a scalar RValue is non-null.
4168
4169 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4171
4172 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4173 /// and issue an ErrorUnsupported style diagnostic (using the
4174 /// provided Name).
4176 const char *Name);
4177
4178 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4179 /// an ErrorUnsupported style diagnostic (using the provided Name).
4181 const char *Name);
4182
4183 /// EmitLValue - Emit code to compute a designator that specifies the location
4184 /// of the expression.
4185 ///
4186 /// This can return one of two things: a simple address or a bitfield
4187 /// reference. In either case, the LLVM Value* in the LValue structure is
4188 /// guaranteed to be an LLVM pointer type.
4189 ///
4190 /// If this returns a bitfield reference, nothing about the pointee type of
4191 /// the LLVM value is known: For example, it may not be a pointer to an
4192 /// integer.
4193 ///
4194 /// If this returns a normal address, and if the lvalue's C type is fixed
4195 /// size, this method guarantees that the returned pointer type will point to
4196 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4197 /// variable length type, this is not possible.
4198 ///
4200 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4201
4202private:
4203 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4204
4205public:
4206 /// Same as EmitLValue but additionally we generate checking code to
4207 /// guard against undefined behavior. This is only suitable when we know
4208 /// that the address will be used to access the object.
4210
4213
4214 void EmitAtomicInit(Expr *E, LValue lvalue);
4215
4217
4220
4222 llvm::AtomicOrdering AO, bool IsVolatile = false,
4224
4225 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
4226
4227 void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
4228 bool IsVolatile, bool isInit);
4229
4230 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
4232 llvm::AtomicOrdering Success =
4233 llvm::AtomicOrdering::SequentiallyConsistent,
4234 llvm::AtomicOrdering Failure =
4235 llvm::AtomicOrdering::SequentiallyConsistent,
4236 bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
4237
4238 /// Emit an atomicrmw instruction, and applying relevant metadata when
4239 /// applicable.
4240 llvm::AtomicRMWInst *emitAtomicRMWInst(
4241 llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
4242 llvm::AtomicOrdering Order = llvm::AtomicOrdering::SequentiallyConsistent,
4243 llvm::SyncScope::ID SSID = llvm::SyncScope::System,
4244 const AtomicExpr *AE = nullptr);
4245
4246 void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
4247 const llvm::function_ref<RValue(RValue)> &UpdateOp,
4248 bool IsVolatile);
4249
4250 /// EmitToMemory - Change a scalar value from its value
4251 /// representation to its in-memory representation.
4252 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
4253
4254 /// EmitFromMemory - Change a scalar value from its memory
4255 /// representation to its value representation.
4256 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
4257
4258 /// Check if the scalar \p Value is within the valid range for the given
4259 /// type \p Ty.
4260 ///
4261 /// Returns true if a check is needed (even if the range is unknown).
4262 bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
4264
4265 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4266 /// care to appropriately convert from the memory representation to
4267 /// the LLVM value representation.
4268 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4271 bool isNontemporal = false) {
4272 return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
4273 CGM.getTBAAAccessInfo(Ty), isNontemporal);
4274 }
4275
4276 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4278 TBAAAccessInfo TBAAInfo,
4279 bool isNontemporal = false);
4280
4281 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4282 /// care to appropriately convert from the memory representation to
4283 /// the LLVM value representation. The l-value must be a simple
4284 /// l-value.
4286
4287 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4288 /// care to appropriately convert from the memory representation to
4289 /// the LLVM value representation.
4290 void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
4291 bool Volatile, QualType Ty,
4293 bool isInit = false, bool isNontemporal = false) {
4294 EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
4295 CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
4296 }
4297
4298 void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
4299 bool Volatile, QualType Ty,
4300 LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
4301 bool isInit = false, bool isNontemporal = false);
4302
4303 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4304 /// care to appropriately convert from the memory representation to
4305 /// the LLVM value representation. The l-value must be a simple
4306 /// l-value. The isInit flag indicates whether this is an initialization.
4307 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
4308 void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
4309
4310 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
4311 /// this method emits the address of the lvalue, then loads the result as an
4312 /// rvalue, returning the rvalue.
4317
4318 /// Like EmitLoadOfLValue but also handles complex and aggregate types.
4321 SourceLocation Loc = {});
4322
4323 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
4324 /// lvalue, where both are guaranteed to the have the same type, and that type
4325 /// is 'Ty'.
4326 void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
4329
4330 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
4331 /// as EmitStoreThroughLValue.
4332 ///
4333 /// \param Result [out] - If non-null, this will be set to a Value* for the
4334 /// bit-field contents after the store, appropriate for use as the result of
4335 /// an assignment to the bit-field.
4337 llvm::Value **Result=nullptr);
4338
4339 /// Emit an l-value for an assignment (simple or compound) of complex type.
4343 llvm::Value *&Result);
4344
4345 // Note: only available for agg return types
4348 // Note: only available for agg return types
4350 llvm::CallBase **CallOrInvoke = nullptr);
4351 // Note: only available for agg return types
4359 bool Accessed = false);
4360 llvm::Value *EmitMatrixIndexExpr(const Expr *E);
4363 bool IsLowerBound = true);
4375
4376 std::pair<LValue, LValue> EmitHLSLOutArgLValues(const HLSLOutArgExpr *E,
4377 QualType Ty);
4379 QualType Ty);
4380
4382
4384
4386 LValueBaseInfo *BaseInfo = nullptr,
4387 TBAAAccessInfo *TBAAInfo = nullptr);
4388
4390 llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
4391 ConstantEmission(llvm::Constant *C, bool isReference)
4392 : ValueAndIsReference(C, isReference) {}
4393 public:
4395 static ConstantEmission forReference(llvm::Constant *C) {
4396 return ConstantEmission(C, true);
4397 }
4398 static ConstantEmission forValue(llvm::Constant *C) {
4399 return ConstantEmission(C, false);
4400 }
4401
4402 explicit operator bool() const {
4403 return ValueAndIsReference.getOpaqueValue() != nullptr;
4404 }
4405
4406 bool isReference() const { return ValueAndIsReference.getInt(); }
4408 assert(isReference());
4409 return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
4410 refExpr->getType());
4411 }
4412
4413 llvm::Constant *getValue() const {
4414 assert(!isReference());
4415 return ValueAndIsReference.getPointer();
4416 }
4417 };
4418
4421 llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
4422
4426
4428 const ObjCIvarDecl *Ivar);
4430 const ObjCIvarDecl *Ivar);
4434 llvm::Value *ThisValue);
4435
4436 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
4437 /// if the Field is a reference, this will return the address of the reference
4438 /// and not the address of the value stored in the reference.
4440 const FieldDecl* Field);
4441
4443 llvm::Value* Base, const ObjCIvarDecl *Ivar,
4444 unsigned CVRQualifiers);
4445
4450
4457
4458 //===--------------------------------------------------------------------===//
4459 // Scalar Expression Emission
4460 //===--------------------------------------------------------------------===//
4461
4462 /// EmitCall - Generate a call of the given function, expecting the given
4463 /// result type, and using the given argument list which specifies both the
4464 /// LLVM arguments and the types they were derived from.
4465 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4467 llvm::CallBase **CallOrInvoke, bool IsMustTail,
4469 bool IsVirtualFunctionPointerThunk = false);
4470 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4472 llvm::CallBase **CallOrInvoke = nullptr,
4473 bool IsMustTail = false) {
4474 return EmitCall(CallInfo, Callee, ReturnValue, Args, CallOrInvoke,
4475 IsMustTail, SourceLocation());
4476 }
4477 RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
4478 ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr,
4479 llvm::CallBase **CallOrInvoke = nullptr,
4480 CGFunctionInfo const **ResolvedFnInfo = nullptr);
4481
4482 // If a Call or Invoke instruction was emitted for this CallExpr, this method
4483 // writes the pointer to `CallOrInvoke` if it's not null.
4486 llvm::CallBase **CallOrInvoke = nullptr);
4488 llvm::CallBase **CallOrInvoke = nullptr);
4490
4491 void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
4493
4494 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4495 const Twine &name = "");
4496 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4498 const Twine &name = "");
4499 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4500 const Twine &name = "");
4501 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4502 ArrayRef<Address> args,
4503 const Twine &name = "");
4504 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4506 const Twine &name = "");
4507
4509 getBundlesForFunclet(llvm::Value *Callee);
4510
4511 llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
4513 const Twine &Name = "");
4514 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4516 const Twine &name = "");
4517 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4518 const Twine &name = "");
4519 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4521
4523 NestedNameSpecifier *Qual,
4524 llvm::Type *Ty);
4525
4528 const CXXRecordDecl *RD);
4529
4531
4532 /// Create the discriminator from the storage address and the entity hash.
4533 llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
4534 llvm::Value *Discriminator);
4536 llvm::Value *StorageAddress,
4537 GlobalDecl SchemaDecl,
4538 QualType SchemaType);
4539
4540 llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
4541 llvm::Value *Pointer);
4542
4543 llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
4544 llvm::Value *Pointer);
4545
4547 const CGPointerAuthInfo &CurAuthInfo,
4548 const CGPointerAuthInfo &NewAuthInfo,
4549 bool IsKnownNonNull);
4550 llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
4551 const CGPointerAuthInfo &CurInfo,
4552 const CGPointerAuthInfo &NewInfo);
4553
4555 const CGPointerAuthInfo &Info,
4557
4558 llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
4559 QualType SourceType, QualType DestType);
4561 QualType DestType);
4562
4564
4565 llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
4566 return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer();
4567 }
4568
4569 // Return the copy constructor name with the prefix "__copy_constructor_"
4570 // removed.
4572 CharUnits Alignment,
4573 bool IsVolatile,
4574 ASTContext &Ctx);
4575
4576 // Return the destructor name with the prefix "__destructor_" removed.
4578 CharUnits Alignment,
4579 bool IsVolatile,
4580 ASTContext &Ctx);
4581
4582 // These functions emit calls to the special functions of non-trivial C
4583 // structs.
4591
4593 const CXXMethodDecl *Method, const CGCallee &Callee,
4594 ReturnValueSlot ReturnValue, llvm::Value *This,
4595 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E,
4596 CallArgList *RtlArgs, llvm::CallBase **CallOrInvoke);
4598 llvm::Value *This, QualType ThisTy,
4599 llvm::Value *ImplicitParam,
4600 QualType ImplicitParamTy, const CallExpr *E,
4601 llvm::CallBase **CallOrInvoke = nullptr);
4604 llvm::CallBase **CallOrInvoke = nullptr);
4606 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
4607 bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
4608 const Expr *Base, llvm::CallBase **CallOrInvoke);
4609 // Compute the object pointer.
4611 llvm::Value *memberPtr,
4612 const MemberPointerType *memberPtrType,
4613 LValueBaseInfo *BaseInfo = nullptr,
4614 TBAAAccessInfo *TBAAInfo = nullptr);
4617 llvm::CallBase **CallOrInvoke);
4618
4620 const CXXMethodDecl *MD,
4622 llvm::CallBase **CallOrInvoke);
4624
4627 llvm::CallBase **CallOrInvoke);
4628
4631
4632 RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
4634
4635 RValue emitRotate(const CallExpr *E, bool IsRotateRight);
4636
4637 /// Emit IR for __builtin_os_log_format.
4639
4640 /// Emit IR for __builtin_is_aligned.
4642 /// Emit IR for __builtin_align_up/__builtin_align_down.
4643 RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp);
4644
4647 CharUnits BufferAlignment);
4648
4650 llvm::CallBase **CallOrInvoke);
4651
4652 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4653 /// is unhandled by the current target.
4654 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4656
4657 llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
4658 const llvm::CmpInst::Predicate Fp,
4659 const llvm::CmpInst::Predicate Ip,
4660 const llvm::Twine &Name = "");
4661 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4663 llvm::Triple::ArchType Arch);
4664 llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4666 llvm::Triple::ArchType Arch);
4667 llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4669 llvm::Triple::ArchType Arch);
4670 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
4671 QualType RTy);
4672 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
4673 QualType RTy);
4674
4675 llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
4676 unsigned LLVMIntrinsic,
4677 unsigned AltLLVMIntrinsic,
4678 const char *NameHint,
4679 unsigned Modifier,
4680 const CallExpr *E,
4682 Address PtrOp0, Address PtrOp1,
4683 llvm::Triple::ArchType Arch);
4684
4685 llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
4686 unsigned Modifier, llvm::Type *ArgTy,
4687 const CallExpr *E);
4688 llvm::Value *EmitNeonCall(llvm::Function *F,
4690 const char *name,
4691 unsigned shift = 0, bool rightshift = false);
4692 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
4693 const llvm::ElementCount &Count);
4694 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
4695 llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
4696 bool negateForRightShift);
4697 llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
4698 llvm::Type *Ty, bool usgn, const char *name);
4699 llvm::Value *vectorWrapScalar16(llvm::Value *Op);
4700 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4701 /// access builtin. Only required if it can't be inferred from the base
4702 /// pointer operand.
4703 llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
4704
4706 getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
4708 llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
4709 llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
4710 llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
4711 llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
4713 llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
4714 llvm::Type *ReturnType,
4716 llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
4717 llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
4718 llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
4719 llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
4720 llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
4722 unsigned BuiltinID);
4723 llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
4725 unsigned BuiltinID);
4726 llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
4727 llvm::ScalableVectorType *VTy);
4728 llvm::Value *EmitSVEPredicateTupleCast(llvm::Value *PredTuple,
4729 llvm::StructType *Ty);
4730 llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
4732 unsigned IntID);
4733 llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
4735 unsigned IntID);
4736 llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
4738 unsigned BuiltinID, bool IsZExtReturn);
4739 llvm::Value *EmitSVEMaskedStore(const CallExpr *,
4741 unsigned BuiltinID);
4742 llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
4744 unsigned BuiltinID);
4745 llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
4747 unsigned IntID);
4748 llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
4750 unsigned IntID);
4751 llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
4753 unsigned IntID);
4754 llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4755
4756 llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
4758 unsigned IntID);
4759 llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
4761 unsigned IntID);
4762 llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
4764 unsigned IntID);
4765 llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
4767 unsigned IntID);
4768
4769 void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
4771 SVETypeFlags TypeFlags);
4772
4773 llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4774
4775 llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4776 llvm::Triple::ArchType Arch);
4777 llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4778
4780 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4781 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4782 llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4783 llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4785 llvm::Value *EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4786 llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
4787 const CallExpr *E);
4788 llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4789 llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4790 llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
4791 const CallExpr *E);
4792 llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4793 llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4795
4796 llvm::Value *EmitRISCVCpuSupports(const CallExpr *E);
4797 llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs);
4798 llvm::Value *EmitRISCVCpuInit();
4799 llvm::Value *EmitRISCVCpuIs(const CallExpr *E);
4800 llvm::Value *EmitRISCVCpuIs(StringRef CPUStr);
4801
4802 void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
4803 const CallExpr *E);
4804 void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
4805 llvm::AtomicOrdering &AO,
4806 llvm::SyncScope::ID &SSID);
4807
4808 enum class MSVCIntrin;
4809 llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
4810
4811 llvm::Value *EmitBuiltinAvailable(const VersionTuple &Version);
4812
4815 llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
4818 llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
4819 const ObjCMethodDecl *MethodWithObjects);
4822 ReturnValueSlot Return = ReturnValueSlot());
4823
4824 /// Retrieves the default cleanup kind for an ARC cleanup.
4825 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4827 return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
4829 }
4830
4831 // ARC primitives.
4832 void EmitARCInitWeak(Address addr, llvm::Value *value);
4834 llvm::Value *EmitARCLoadWeak(Address addr);
4836 llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
4837 void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4838 void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4841 llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
4842 llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
4843 llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
4844 bool resultIgnored);
4845 llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
4846 bool resultIgnored);
4847 llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
4848 llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
4849 llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
4851 void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4852 llvm::Value *EmitARCAutorelease(llvm::Value *value);
4853 llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
4854 llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
4855 llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
4856 llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
4857
4858 llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
4859 llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
4860 llvm::Type *returnType);
4861 void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4862
4863 std::pair<LValue,llvm::Value*>
4865 std::pair<LValue,llvm::Value*>
4866 EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
4867 std::pair<LValue,llvm::Value*>
4869
4870 llvm::Value *EmitObjCAlloc(llvm::Value *value,
4871 llvm::Type *returnType);
4872 llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
4873 llvm::Type *returnType);
4874 llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
4875
4876 llvm::Value *EmitObjCThrowOperand(const Expr *expr);
4877 llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
4878 llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
4879
4880 llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
4881 llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
4882 bool allowUnsafeClaim);
4883 llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
4886
4888
4890
4896
4897 void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
4900 void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
4901 void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
4902
4903 /// Emits a reference binding to the passed in expression.
4905
4906 //===--------------------------------------------------------------------===//
4907 // Expression Emission
4908 //===--------------------------------------------------------------------===//
4909
4910 // Expressions are broken into three classes: scalar, complex, aggregate.
4911
4912 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
4913 /// scalar type, returning the result.
4914 llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
4915
4916 /// Emit a conversion from the specified type to the specified destination
4917 /// type, both of which are LLVM scalar types.
4918 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
4919 QualType DstTy, SourceLocation Loc);
4920
4921 /// Emit a conversion from the specified complex type to the specified
4922 /// destination type, where the destination type is an LLVM scalar type.
4924 QualType DstTy,
4926
4927 /// EmitAggExpr - Emit the computation of the specified expression
4928 /// of aggregate type. The result is computed into the given slot,
4929 /// which may be null to indicate that the value is not needed.
4930 void EmitAggExpr(const Expr *E, AggValueSlot AS);
4931
4932 /// EmitAggExprToLValue - Emit the computation of the specified expression of
4933 /// aggregate type into a temporary LValue.
4935
4937
4938 /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
4939 /// destination address.
4941 ExprValueKind SrcKind);
4942
4943 /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
4944 /// to at most \arg DstSize bytes.
4945 void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
4946 bool DstIsVolatile);
4947
4948 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
4949 /// make sure it survives garbage collection until this point.
4950 void EmitExtendGCLifetime(llvm::Value *object);
4951
4952 /// EmitComplexExpr - Emit the computation of the specified expression of
4953 /// complex type, returning the result.
4955 bool IgnoreReal = false,
4956 bool IgnoreImag = false);
4957
4958 /// EmitComplexExprIntoLValue - Emit the given expression of complex
4959 /// type and place its result into the specified l-value.
4960 void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
4961
4962 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
4963 void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
4964
4965 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
4967
4969 llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
4972
4975
4976 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
4977 /// global variable that has already been created for it. If the initializer
4978 /// has a different type than GV does, this may free GV and return a different
4979 /// one. Otherwise it just returns GV.
4980 llvm::GlobalVariable *
4982 llvm::GlobalVariable *GV);
4983
4984 // Emit an @llvm.invariant.start call for the given memory region.
4985 void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
4986
4987 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
4988 /// variable with global storage.
4989 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
4990 bool PerformInit);
4991
4992 llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
4993 llvm::Constant *Addr);
4994
4995 llvm::Function *createTLSAtExitStub(const VarDecl &VD,
4996 llvm::FunctionCallee Dtor,
4997 llvm::Constant *Addr,
4998 llvm::FunctionCallee &AtExit);
4999
5000 /// Call atexit() with a function that passes the given argument to
5001 /// the given function.
5002 void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
5003 llvm::Constant *addr);
5004
5005 /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
5006 /// support an 'atexit()' function.
5007 void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
5008 llvm::Constant *addr);
5009
5010 /// Call atexit() with function dtorStub.
5011 void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
5012
5013 /// Call unatexit() with function dtorStub.
5014 llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub);
5015
5016 /// Emit code in this function to perform a guarded variable
5017 /// initialization. Guarded initializations are used when it's not
5018 /// possible to prove that an initialization will be done exactly
5019 /// once, e.g. with a static local variable or a static data member
5020 /// of a class template.
5021 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
5022 bool PerformInit);
5023
5025
5026 /// Emit a branch to select whether or not to perform guarded initialization.
5027 void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
5028 llvm::BasicBlock *InitBlock,
5029 llvm::BasicBlock *NoInitBlock,
5030 GuardKind Kind, const VarDecl *D);
5031
5032 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
5033 /// variables.
5034 void
5035 GenerateCXXGlobalInitFunc(llvm::Function *Fn,
5036 ArrayRef<llvm::Function *> CXXThreadLocals,
5038
5039 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
5040 /// variables.
5042 llvm::Function *Fn,
5043 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
5044 llvm::Constant *>>
5045 DtorsOrStermFinalizers);
5046
5047 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
5048 const VarDecl *D,
5049 llvm::GlobalVariable *Addr,
5050 bool PerformInit);
5051
5053
5054 void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
5055
5056 void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
5057
5059
5060 //===--------------------------------------------------------------------===//
5061 // Annotations Emission
5062 //===--------------------------------------------------------------------===//
5063
5064 /// Emit an annotation call (intrinsic).
5065 llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
5066 llvm::Value *AnnotatedVal,
5067 StringRef AnnotationStr,
5068 SourceLocation Location,
5069 const AnnotateAttr *Attr);
5070
5071 /// Emit local annotations for the local variable V, declared by D.
5072 void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
5073
5074 /// Emit field annotations for the given field & value. Returns the
5075 /// annotation result.
5077
5078 //===--------------------------------------------------------------------===//
5079 // Internal Helpers
5080 //===--------------------------------------------------------------------===//
5081
5082 /// ContainsLabel - Return true if the statement contains a label in it. If
5083 /// this statement is not executed normally, it not containing a label means
5084 /// that we can just remove the code.
5085 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
5086
5087 /// containsBreak - Return true if the statement contains a break out of it.
5088 /// If the statement (recursively) contains a switch or loop with a break
5089 /// inside of it, this is fine.
5090 static bool containsBreak(const Stmt *S);
5091
5092 /// Determine if the given statement might introduce a declaration into the
5093 /// current scope, by being a (possibly-labelled) DeclStmt.
5094 static bool mightAddDeclToScope(const Stmt *S);
5095
5096 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5097 /// to a constant, or if it does but contains a label, return false. If it
5098 /// constant folds return true and set the boolean result in Result.
5100 bool AllowLabels = false);
5101
5102 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5103 /// to a constant, or if it does but contains a label, return false. If it
5104 /// constant folds return true and set the folded value.
5105 bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
5106 bool AllowLabels = false);
5107
5108 /// Ignore parentheses and logical-NOT to track conditions consistently.
5109 static const Expr *stripCond(const Expr *C);
5110
5111 /// isInstrumentedCondition - Determine whether the given condition is an
5112 /// instrumentable condition (i.e. no "&&" or "||").
5113 static bool isInstrumentedCondition(const Expr *C);
5114
5115 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
5116 /// increments a profile counter based on the semantics of the given logical
5117 /// operator opcode. This is used to instrument branch condition coverage
5118 /// for logical operators.
5120 llvm::BasicBlock *TrueBlock,
5121 llvm::BasicBlock *FalseBlock,
5122 uint64_t TrueCount = 0,
5124 const Expr *CntrIdx = nullptr);
5125
5126 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
5127 /// if statement) to the specified blocks. Based on the condition, this might
5128 /// try to simplify the codegen of the conditional based on the branch.
5129 /// TrueCount should be the number of times we expect the condition to
5130 /// evaluate to true based on PGO data.
5131 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
5132 llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
5134 const Expr *ConditionalOp = nullptr);
5135
5136 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
5137 /// nonnull, if \p LHS is marked _Nonnull.
5138 void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
5139
5140 /// An enumeration which makes it easier to specify whether or not an
5141 /// operation is a subtraction.
5142 enum { NotSubtraction = false, IsSubtraction = true };
5143
5144 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
5145 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
5146 /// \p SignedIndices indicates whether any of the GEP indices are signed.
5147 /// \p IsSubtraction indicates whether the expression used to form the GEP
5148 /// is a subtraction.
5149 llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
5151 bool SignedIndices,
5152 bool IsSubtraction,
5154 const Twine &Name = "");
5155
5157 llvm::Type *elementType, bool SignedIndices,
5159 CharUnits Align, const Twine &Name = "");
5160
5161 /// Specifies which type of sanitizer check to apply when handling a
5162 /// particular builtin.
5167 };
5168
5169 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
5170 /// enabled, a runtime check specified by \p Kind is also emitted.
5172
5173 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
5174 /// sanitizer is enabled, a runtime check is also emitted.
5175 llvm::Value *EmitCheckedArgForAssume(const Expr *E);
5176
5177 /// Emit a description of a type in a format suitable for passing to
5178 /// a runtime sanitizer handler.
5180
5181 /// Convert a value into a format suitable for passing to a runtime
5182 /// sanitizer handler.
5183 llvm::Value *EmitCheckValue(llvm::Value *V);
5184
5185 /// Emit a description of a source location in a format suitable for
5186 /// passing to a runtime sanitizer handler.
5188
5191
5192 /// Create a basic block that will either trap or call a handler function in
5193 /// the UBSan runtime with the provided arguments, and create a conditional
5194 /// branch to it.
5195 void
5196 EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
5197 Checked,
5199 ArrayRef<llvm::Value *> DynamicArgs);
5200
5201 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
5202 /// if Cond if false.
5204 llvm::Value *Cond, llvm::ConstantInt *TypeId,
5205 llvm::Value *Ptr,
5206 ArrayRef<llvm::Constant *> StaticArgs);
5207
5208 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
5209 /// checking is enabled. Otherwise, just emit an unreachable instruction.
5211
5212 /// Create a basic block that will call the trap intrinsic, and emit a
5213 /// conditional branch to it, for the -ftrapv checks.
5214 void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID,
5215 bool NoMerge = false);
5216
5217 /// Emit a call to trap or debugtrap and attach function attribute
5218 /// "trap-func-name" if specified.
5219 llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
5220
5221 /// Emit a stub for the cross-DSO CFI check function.
5223
5224 /// Emit a cross-DSO CFI failure handling function.
5226
5227 /// Create a check for a function parameter that may potentially be
5228 /// declared as non-null.
5230 AbstractCallee AC, unsigned ParmNum);
5231
5233 SourceLocation ArgLoc, AbstractCallee AC,
5234 unsigned ParmNum);
5235
5236 /// EmitWriteback - Emit callbacks for function.
5237 void EmitWritebacks(const CallArgList &Args);
5238
5239 /// EmitCallArg - Emit a single call argument.
5240 void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
5241
5242 /// EmitDelegateCallArg - We are performing a delegate call; that
5243 /// is, the current function is delegating to another one. Produce
5244 /// a r-value suitable for passing the given parameter.
5245 void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
5246 SourceLocation loc);
5247
5248 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
5249 /// point operation, expressed as the maximum relative error in ulp.
5250 void SetFPAccuracy(llvm::Value *Val, float Accuracy);
5251
5252 /// Set the minimum required accuracy of the given sqrt operation
5253 /// based on CodeGenOpts.
5254 void SetSqrtFPAccuracy(llvm::Value *Val);
5255
5256 /// Set the minimum required accuracy of the given sqrt operation based on
5257 /// CodeGenOpts.
5258 void SetDivFPAccuracy(llvm::Value *Val);
5259
5260 /// Set the codegen fast-math flags.
5261 void SetFastMathFlags(FPOptions FPFeatures);
5262
5263 // Truncate or extend a boolean vector to the requested number of elements.
5264 llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
5265 unsigned NumElementsDst,
5266 const llvm::Twine &Name = "");
5267
5268private:
5269 // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
5270 // as it's parent convergence instr.
5271 llvm::ConvergenceControlInst *emitConvergenceLoopToken(llvm::BasicBlock *BB);
5272
5273 // Adds a convergence_ctrl token with |ParentToken| as parent convergence
5274 // instr to the call |Input|.
5275 llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input);
5276
5277 // Find the convergence_entry instruction |F|, or emits ones if none exists.
5278 // Returns the convergence instruction.
5279 llvm::ConvergenceControlInst *
5280 getOrEmitConvergenceEntryToken(llvm::Function *F);
5281
5282private:
5283 llvm::MDNode *getRangeForLoadFromType(QualType Ty);
5284 void EmitReturnOfRValue(RValue RV, QualType Ty);
5285
5286 void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
5287
5289 DeferredReplacements;
5290
5291 /// Set the address of a local variable.
5292 void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
5293 assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
5294 LocalDeclMap.insert({VD, Addr});
5295 }
5296
5297 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
5298 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
5299 ///
5300 /// \param AI - The first function argument of the expansion.
5301 void ExpandTypeFromArgs(QualType Ty, LValue Dst,
5302 llvm::Function::arg_iterator &AI);
5303
5304 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
5305 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
5306 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
5307 void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
5309 unsigned &IRCallArgPos);
5310
5311 std::pair<llvm::Value *, llvm::Type *>
5312 EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
5313 std::string &ConstraintStr);
5314
5315 std::pair<llvm::Value *, llvm::Type *>
5316 EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
5317 QualType InputType, std::string &ConstraintStr,
5319
5320 /// Attempts to statically evaluate the object size of E. If that
5321 /// fails, emits code to figure the size of E out for us. This is
5322 /// pass_object_size aware.
5323 ///
5324 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
5325 llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
5326 llvm::IntegerType *ResType,
5327 llvm::Value *EmittedE,
5328 bool IsDynamic);
5329
5330 /// Emits the size of E, as required by __builtin_object_size. This
5331 /// function is aware of pass_object_size parameters, and will act accordingly
5332 /// if E is a parameter with the pass_object_size attribute.
5333 llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
5334 llvm::IntegerType *ResType,
5335 llvm::Value *EmittedE,
5336 bool IsDynamic);
5337
5338 llvm::Value *emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
5339 llvm::IntegerType *ResType);
5340
5341 void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
5342 Address Loc);
5343
5344public:
5345 enum class EvaluationOrder {
5346 ///! No language constraints on evaluation order.
5347 Default,
5348 ///! Language semantics require left-to-right evaluation.
5350 ///! Language semantics require right-to-left evaluation.
5352 };
5353
5354 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
5355 // an ObjCMethodDecl.
5357 llvm::PointerUnion<const FunctionProtoType *, const ObjCMethodDecl *> P;
5358
5361 };
5362
5364 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
5366 unsigned ParamsToSkip = 0,
5368
5369 /// EmitPointerWithAlignment - Given an expression with a pointer type,
5370 /// emit the value and compute our best estimate of the alignment of the
5371 /// pointee.
5372 ///
5373 /// \param BaseInfo - If non-null, this will be initialized with
5374 /// information about the source of the alignment and the may-alias
5375 /// attribute. Note that this function will conservatively fall back on
5376 /// the type when it doesn't recognize the expression and may-alias will
5377 /// be set to false.
5378 ///
5379 /// One reasonable way to use this information is when there's a language
5380 /// guarantee that the pointer must be aligned to some stricter value, and
5381 /// we're simply trying to ensure that sufficiently obvious uses of under-
5382 /// aligned objects don't get miscompiled; for example, a placement new
5383 /// into the address of a local variable. In such a case, it's quite
5384 /// reasonable to just ignore the returned alignment when it isn't from an
5385 /// explicit source.
5386 Address
5387 EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
5388 TBAAAccessInfo *TBAAInfo = nullptr,
5389 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
5390
5391 /// If \p E references a parameter with pass_object_size info or a constant
5392 /// array size modifier, emit the object size divided by the size of \p EltTy.
5393 /// Otherwise return null.
5394 llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
5395
5396 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
5397
5399 llvm::Function *Function;
5401 std::optional<StringRef> Architecture;
5402
5403 FMVResolverOption(llvm::Function *F, ArrayRef<StringRef> Feats,
5404 std::optional<StringRef> Arch = std::nullopt)
5405 : Function(F), Features(Feats), Architecture(Arch) {}
5406 };
5407
5408 // Emits the body of a multiversion function's resolver. Assumes that the
5409 // options are already sorted in the proper order, with the 'default' option
5410 // last (if it exists).
5411 void EmitMultiVersionResolver(llvm::Function *Resolver,
5413 void EmitX86MultiVersionResolver(llvm::Function *Resolver,
5415 void EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
5417 void EmitRISCVMultiVersionResolver(llvm::Function *Resolver,
5419
5420private:
5421 QualType getVarArgType(const Expr *Arg);
5422
5423 void EmitDeclMetadata();
5424
5425 BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
5426 const AutoVarEmission &emission);
5427
5428 void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
5429
5430 llvm::Value *GetValueForARMHint(unsigned BuiltinID);
5431 llvm::Value *EmitX86CpuIs(const CallExpr *E);
5432 llvm::Value *EmitX86CpuIs(StringRef CPUStr);
5433 llvm::Value *EmitX86CpuSupports(const CallExpr *E);
5434 llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
5435 llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
5436 llvm::Value *EmitX86CpuInit();
5437 llvm::Value *FormX86ResolverCondition(const FMVResolverOption &RO);
5438 llvm::Value *EmitAArch64CpuInit();
5439 llvm::Value *FormAArch64ResolverCondition(const FMVResolverOption &RO);
5440 llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
5441 llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
5442};
5443
5446 if (!needsSaving(value)) return saved_type(value, false);
5447
5448 // Otherwise, we need an alloca.
5449 auto align = CharUnits::fromQuantity(
5450 CGF.CGM.getDataLayout().getPrefTypeAlign(value->getType()));
5451 Address alloca =
5452 CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
5453 CGF.Builder.CreateStore(value, alloca);
5454
5455 return saved_type(alloca.emitRawPointer(CGF), true);
5456}
5457
5459 saved_type value) {
5460 // If the value says it wasn't saved, trust that it's still dominating.
5461 if (!value.getInt()) return value.getPointer();
5462
5463 // Otherwise, it should be an alloca instruction, as set up in save().
5464 auto alloca = cast<llvm::AllocaInst>(value.getPointer());
5465 return CGF.Builder.CreateAlignedLoad(alloca->getAllocatedType(), alloca,
5466 alloca->getAlign());
5467}
5468
5469} // end namespace CodeGen
5470
5471// Map the LangOption for floating point exception behavior into
5472// the corresponding enum in the IR.
5473llvm::fp::ExceptionBehavior
5475} // end namespace clang
5476
5477#endif
Enums/classes describing ABI related information about constructors, destructors and thunks.
#define V(N, I)
Definition: ASTContext.h:3453
static bool CanThrow(Expr *E, ASTContext &Ctx)
Definition: CFG.cpp:2686
@ ForDeactivation
Definition: CGCleanup.cpp:1205
const Decl * D
enum clang::sema::@1724::IndirectLocalPathEntry::EntryKind Kind
Expr * E
unsigned OldSize
Defines the clang::Expr interface and subclasses for C++ expressions.
const CFGBlock * Block
Definition: HTMLLogger.cpp:152
#define X(type, name)
Definition: Value.h:144
llvm::MachO::Architecture Architecture
Definition: MachO.h:27
llvm::MachO::Target Target
Definition: MachO.h:51
Defines some OpenMP-specific enums and functions.
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
const char * Data
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
C Language Family Type Representation.
StateNode * Previous
#define bool
Definition: amdgpuintrin.h:20
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4224
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6986
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2718
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3577
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3127
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6678
Attr - This represents one attribute.
Definition: Attr.h:43
Represents an attribute applied to a statement.
Definition: Stmt.h:2107
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition: Expr.h:4324
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition: Expr.h:4362
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition: Expr.h:4359
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3909
static bool isLogicalOp(Opcode Opc)
Definition: Expr.h:4042
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6414
BreakStmt - This represents a break.
Definition: Stmt.h:3007
Represents a call to a CUDA kernel function.
Definition: ExprCXX.h:231
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2553
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1268
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1375
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2498
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2817
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:478
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1737
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:176
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2078
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2241
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:81
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2617
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
Represents a C++ temporary.
Definition: ExprCXX.h:1457
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1206
CXXTryStmt - A C++ try block, including all handlers.
Definition: StmtCXX.h:69
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1066
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
Describes the capture of either a variable, or 'this', or variable-length array type.
Definition: Stmt.h:3797
This captures a statement into a function.
Definition: Stmt.h:3784
CaseStmt - Represent a case statement.
Definition: Stmt.h:1828
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3547
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3614
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
Represents a 'co_await' expression.
Definition: ExprCXX.h:5191
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:193
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
bool hasOffset() const
Definition: Address.h:242
void setAlignment(CharUnits Value)
Definition: Address.h:191
llvm::Value * getOffset() const
Definition: Address.h:244
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:181
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:858
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:898
A pair of helper functions for a __block variable.
Information about the layout of a __block variable.
Definition: CGBlocks.h:136
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:156
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
Definition: CGBuilder.h:158
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
All available information about a concrete callee.
Definition: CGCall.h:63
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
RawAddress getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
API for captured statement code generation.
static bool classof(const CGCapturedStmtInfo *)
llvm::SmallDenseMap< const VarDecl *, FieldDecl * > getCaptureFields()
Get the CaptureFields.
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
RAII for correct setting/restoring of CapturedStmtInfo.
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
void Emit(CodeGenFunction &CGF, Flags flags) override
Emit the cleanup.
CallLifetimeEnd(RawAddress addr, llvm::Value *size)
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const
A scope within which we are constructing the fields of an object which might use a CXXDefaultInitExpr...
FieldConstructionScope(CodeGenFunction &CGF, Address This)
A class controlling the emission of a finally block.
void enter(CodeGenFunction &CGF, const Stmt *Finally, llvm::FunctionCallee beginCatchFn, llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn)
Enters a finally block for an implementation using zero-cost exceptions.
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:726
LexicalScope(CodeGenFunction &CGF, SourceRange Range)
Enter a new cleanup scope.
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
~LexicalScope()
Exit this cleanup scope, emitting any accumulated cleanups.
RAII for preserving necessary info during inlined region body codegen.
InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &FiniBB)
void Emit(CodeGenFunction &CGF, Flags) override
Emit the cleanup.
RAII for preserving necessary info during Outlined region body codegen.
OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &RetBB)
Controls insertion of cancellation exit blocks in worksharing constructs.
OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel)
Save/restore original map of previously emitted local vars in case when we need to duplicate emission...
The class used to assign some variables some temporarily addresses.
bool apply(CodeGenFunction &CGF)
Applies new addresses to the list of the variables.
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD, Address TempAddr)
Sets the address of the variable LocalVD to be TempAddr in function CGF.
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
void restoreMap()
Restore all mapped variables w/o clean up.
bool Privatize()
Privatizes local variables previously registered as private.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
OMPPrivateScope(CodeGenFunction &CGF)
Enter a new OpenMP private scope.
~OMPPrivateScope()
Exit scope - all the mapped variables are restored.
bool addPrivate(const VarDecl *LocalVD, Address Addr)
Registers LocalVD variable as a private with Addr as the address of the corresponding private variabl...
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?...
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, LValue lvalue)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
ParentLoopDirectiveForScanRegion(CodeGenFunction &CGF, const OMPExecutableDirective &ParentLoopDirectiveForScan)
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RunCleanupsScope(CodeGenFunction &CGF)
Enter a new cleanup scope.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
An RAII object to record that we're evaluating a statement expression.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, ObjCMethodDecl *MD, bool ctor)
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, QualType::DestructionKind dtorKind)
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn, ArrayRef< llvm::Function * > CXXThreadLocals, ConstantAddress Guard=ConstantAddress::invalid())
GenerateCXXGlobalInitFunc - Generates code for initializing global variables.
llvm::Value * EmitPointerAuthAuth(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
void EmitGotoStmt(const GotoStmt &S)
void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, Address This, Address Src, const CXXConstructExpr *E)
void EmitDestructorBody(FunctionArgList &Args)
void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, OMPTaskDataTy &Data)
void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD)
llvm::BasicBlock * getEHDispatchBlock(EHScopeStack::stable_iterator scope)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, llvm::Value *CompletePtr, QualType ElementType)
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void EmitARCDestroyWeak(Address addr)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags, bool LoadBlockVarAddr, bool CanThrow)
Enter a cleanup to destroy a __block variable.
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPAggregateAssign(Address DestAddr, Address SrcAddr, QualType OriginalType, const llvm::function_ref< void(Address, Address)> CopyGen)
Perform element by element copying of arrays with type OriginalType from SrcAddr to DestAddr using co...
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
void EmitAsanPrologueOrEpilogue(bool Prologue)
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EnterSEHTryStmt(const SEHTryStmt &S)
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl)
llvm::Value * EmitSVEPredicateCast(llvm::Value *Pred, llvm::ScalableVectorType *VTy)
Address getExceptionSlot()
Returns a pointer to the function's exception object and selector slot, which is assigned in every la...
RawAddress CreateMemTemp(QualType T, CharUnits Align, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
bool isBinaryLogicalOp(const Expr *E) const
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void VolatilizeTryBlocks(llvm::BasicBlock *BB, llvm::SmallPtrSet< llvm::BasicBlock *, 10 > &V)
void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp, const CGFunctionInfo **ImplFnInfo, llvm::Function **ImplFn)
llvm::Function * GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF, const SEHFinallyStmt &Finally)
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
llvm::Function * GenerateSEHFilterFunction(CodeGenFunction &ParentCGF, const SEHExceptStmt &Except)
static Destroyer destroyNonTrivialCStruct
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee, const ThunkInfo *Thunk, bool IsUnprototyped)
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, const FunctionArgList &Args, SourceLocation Loc)
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
static bool cxxDestructorCanThrow(QualType T)
Check if T is a C++ class that has a destructor that can throw.
void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, const FunctionArgList &Args)
llvm::Function * GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk)
SanitizerSet SanOpts
Sanitizers enabled for this function.
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
llvm::BasicBlock * getInvokeDestImpl()
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr, const VarDecl *DestVD, const VarDecl *SrcVD, const Expr *Copy)
Emit proper copying of data from one variable to another.
void EmitIfStmt(const IfStmt &S)
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, CallArgList &CallArgs, const CGFunctionInfo *CallOpFnInfo=nullptr, llvm::Constant *CallOpFn=nullptr)
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T, Address Addr)
PushDestructorCleanup - Push a cleanup to call the complete-object variant of the given destructor on...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
void EmitARCMoveWeak(Address dst, Address src)
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope, bool ForInscan=false)
Emit initial code for reduction variables.
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitVTableAssumptionLoad(const VPtr &vptr, Address This)
Emit assumption that vptr load == global vtable.
void unprotectFromPeepholes(PeepholeProtection protection)
Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter, const Stmt *OutlinedStmt)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void generateObjCGetterBody(const ObjCImplementationDecl *classImpl, const ObjCPropertyImplDecl *propImpl, const ObjCMethodDecl *GetterMothodDecl, llvm::Constant *AtomicHelperFn)
void EmitAutoVarDecl(const VarDecl &D)
EmitAutoVarDecl - Emit an auto variable declaration.
void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E, SmallVectorImpl< llvm::Value * > &Ops, SVETypeFlags TypeFlags)
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
llvm::Constant * createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, llvm::Constant *Addr)
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static void EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDirective &S)
Emit device code for the target teams directive.
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void callCStructDefaultConstructor(LValue Dst)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
llvm::Value * EmitObjCAutoreleasePoolPush()
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx)
void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc)
EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
llvm::Value * EmitARCRetainAutoreleaseNonBlock(llvm::Value *value)
void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr)
const BlockByrefInfo & getBlockByrefInfo(const VarDecl *var)
AwaitSuspendWrapperInfo CurAwaitSuspendWrapper
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * >(CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T)
Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known to be unsigned.
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Address authPointerToPointerCast(Address Ptr, QualType SourceType, QualType DestType)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
std::pair< LValue, llvm::Value * > EmitARCStoreStrong(const BinaryOperator *e, bool ignored)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
RValue EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method, const CGCallee &Callee, ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E, CallArgList *RtlArgs, llvm::CallBase **CallOrInvoke)
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk, bool IsUnprototyped)
Generate a thunk for the given method.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * EmitARCRetainAutoreleasedReturnValue(llvm::Value *value)
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
static void EmitOMPTargetTeamsDistributeDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeDirective &S)
Emit device code for the target teams distribute directive.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp)
static void EmitOMPTargetParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForSimdDirective &S)
Emit device code for the target parallel for simd directive.
llvm::Value * EmitObjCAllocWithZone(llvm::Value *value, llvm::Type *returnType)
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, llvm::Value *NumElements, Address ArrayPtr, const CXXConstructExpr *E, bool NewPointerIsChecked, bool ZeroInitialization=false)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::Value * EmitSVEGatherLoad(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, const ArrayType *ArrayTy, Address ArrayPtr, const CXXConstructExpr *E, bool NewPointerIsChecked, bool ZeroInitialization=false)
void popCatchScope()
popCatchScope - Pops the catch scope at the top of the EHScope stack, emitting any required code (oth...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
llvm::Value * EmitRISCVCpuSupports(const CallExpr *E)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
const OMPExecutableDirective * OMPParentLoopDirectiveForScan
Parent loop-based directive for scan directive.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitOMPTaskDirective(const OMPTaskDirective &S)
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitAnyExprToExn(const Expr *E, Address Addr)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, const Expr *Base, llvm::CallBase **CallOrInvoke)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
llvm::Value * EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void GenerateObjCMethod(const ObjCMethodDecl *OMD)
void EmitOMPUseDevicePtrClause(const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap)
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
llvm::Value * EmitARCAutorelease(llvm::Value *value)
llvm::Value * emitPointerAuthResignCall(llvm::Value *Pointer, const CGPointerAuthInfo &CurInfo, const CGPointerAuthInfo &NewInfo)
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
void EmitExtendGCLifetime(llvm::Value *object)
EmitExtendGCLifetime - Given a pointer to an Objective-C object, make sure it survives garbage collec...
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
void EmitOMPDistributeLoop(const OMPLoopDirective &S, const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr)
Emit code for the distribute loop-based directive.
void EmitARCNoopIntrinsicUse(ArrayRef< llvm::Value * > values)
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::Value * EmitSVEMaskedStore(const CallExpr *, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
llvm::Constant * GenerateObjCAtomicGetterCopyHelperFunction(const ObjCPropertyImplDecl *PID)
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S)
void callCStructMoveConstructor(LValue Dst, LValue Src)
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty)
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF, llvm::Value *ParentFP, llvm::Value *EntryEBP)
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
llvm::BasicBlock * getEHResumeBlock(bool isCleanup)
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetDirective &S)
Emit device code for the target directive.
void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr, QualType DeleteTy, llvm::Value *NumElements=nullptr, CharUnits CookieSize=CharUnits())
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void callCStructCopyConstructor(LValue Dst, LValue Src)
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitSEHExceptionInfo()
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
llvm::Value * EmitARCLoadWeakRetained(Address addr)
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S)
Emit device code for the target simd directive.
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, llvm::AtomicOrdering &AO, llvm::SyncScope::ID &SSID)
void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S, OMPPrivateScope &LoopScope)
Emit initial code for loop counters of loop-based directives.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
LValue EmitInitListLValue(const InitListExpr *E)
llvm::Value * EmitARCRetainAutorelease(QualType type, llvm::Value *value)
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end, QualType elementType, CharUnits elementAlign, Destroyer *destroyer, bool checkZeroLength, bool useEHCleanup)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D, bool NoFinals, llvm::Value *IsLastIterCond=nullptr)
Emit final copying of lastprivate values to original variables at the end of the worksharing or simd ...
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::Function * generateAwaitSuspendWrapper(Twine const &CoroName, Twine const &SuspendPointName, CoroutineSuspendExpr const &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
SmallVector< llvm::Value *, 8 > ObjCEHValueStack
ObjCEHValueStack - Stack of Objective-C exception values, used for rethrows.
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
void EmitFunctionBody(const Stmt *Body)
VlaSizePair getVLAElements1D(QualType vla)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Value * EmitSVETupleCreate(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType, ArrayRef< llvm::Value * > Ops)
const CodeGen::CGBlockInfo * BlockInfo
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitAggregateCopyCtor(LValue Dest, LValue Src, AggValueSlot::Overlap_t MayOverlap)
llvm::Value * EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable, llvm::Type *VTableTy, uint64_t VTableByteOffset)
Emit a type checked load from the given vtable.
void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void EmitRISCVMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void markStmtAsUsed(bool Skipped, const Stmt *S)
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
llvm::AllocaInst * EHSelectorSlot
The selector slot.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
EmitExprAsInit - Emits the code necessary to initialize a location in memory with the given initializ...
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, const Twine &name="")
void emitByrefStructureInit(const AutoVarEmission &emission)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
llvm::Value * EmitObjCRetainNonBlock(llvm::Value *value, llvm::Type *returnType)
llvm::Value * GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, bool Delegating)
GetVTTParameter - Return the VTT parameter that should be passed to a base constructor/destructor wit...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
llvm::Value * EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType)
Address EmitExtVectorElementLValue(LValue V)
void EmitOMPSimdFinal(const OMPLoopDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_NonnullAssign
Checking the value assigned to a _Nonnull pointer. Must not be null.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
llvm::Value * EmitSMELdrStr(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitContinueStmt(const ContinueStmt &S)
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
llvm::BasicBlock * getTerminateFunclet()
getTerminateLandingPad - Return a cleanup funclet that just calls terminate.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Value * EmitARCStoreStrongCall(Address addr, llvm::Value *value, bool resultIgnored)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isInit=false, bool isNontemporal=false)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx, const llvm::ElementCount &Count)
VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass)
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
llvm::Type * ConvertTypeForMem(QualType T)
llvm::Function * createTLSAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, llvm::Constant *Addr, llvm::FunctionCallee &AtExit)
Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *elementType, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align, const Twine &Name="")
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
llvm::Value * EmitARCUnsafeUnretainedScalarExpr(const Expr *expr)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAutoVarInit(const AutoVarEmission &emission)
llvm::BasicBlock * getUnreachableBlock()
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy)
Emit an aggregate assignment.
void GenerateOpenMPCapturedVars(const CapturedStmt &S, SmallVectorImpl< llvm::Value * > &CapturedVars)
void EmitNonNullArgCheck(Address Addr, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
bool isPointerKnownNonNull(const Expr *E)
RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align, const Twine &Name="tmp")
llvm::Value * EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID, bool IsZExtReturn)
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
llvm::SmallVector< const JumpDest *, 2 > SEHTryEpilogueStack
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
DominatingValue< T >::saved_type saveValueInCond(T value)
const llvm::function_ref< void(CodeGenFunction &, llvm::Function *, const OMPTaskDataTy &)> TaskGenTy
static void EmitOMPTargetTeamsGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsGenericLoopDirective &S)
Emit device code for the target teams loop directive.
llvm::Value * ExceptionSlot
The exception slot.
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
llvm::Value * EmitARCRetainBlock(llvm::Value *value, bool mandatory)
QualType TypeOfSelfObject()
TypeOfSelfObject - Return type of object that this self represents.
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
llvm::Value * EmitSVEDupX(llvm::Value *Scalar)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void markStmtMaybeUsed(const Stmt *S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitAttributedStmt(const AttributedStmt &S)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
llvm::BasicBlock * OMPBeforeScanBlock
void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn, llvm::Constant *addr)
Registers the dtor using 'llvm.global_dtors' for platforms that do not support an 'atexit()' function...
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType, llvm::Type *ElementTy, Address NewPtr, llvm::Value *NumElements, llvm::Value *AllocSizeWithoutCookie)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
llvm::Value * EmitPointerAuthSign(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A)
void EmitAtomicInit(Expr *E, LValue lvalue)
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
static std::string getNonTrivialDestructorStr(QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx)
llvm::DenseMap< const Decl *, Address > DeclMapTy
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForDirective &S)
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy, SourceLocation Loc)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
void EmitOMPInnerLoop(const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, const Expr *IncExpr, const llvm::function_ref< void(CodeGenFunction &)> BodyGen, const llvm::function_ref< void(CodeGenFunction &)> PostIncGen)
Emit inner loop of the worksharing/simd construct.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress, llvm::Value *Discriminator)
Create the discriminator from the storage address and the entity hash.
llvm::Constant * GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo)
llvm::Value * vectorWrapScalar16(llvm::Value *Op)
llvm::Function * LookupNeonLLVMIntrinsic(unsigned IntrinsicID, unsigned Modifier, llvm::Type *ArgTy, const CallExpr *E)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
void EmitLabelStmt(const LabelStmt &S)
void emitDestroy(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
llvm::Value * EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
llvm::Value * EmitSEHExceptionCode()
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Value * EmitObjCCollectionLiteral(const Expr *E, const ObjCMethodDecl *MethodWithObjects)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void GenerateCXXGlobalCleanUpFunc(llvm::Function *Fn, ArrayRef< std::tuple< llvm::FunctionType *, llvm::WeakTrackingVH, llvm::Constant * > > DtorsOrStermFinalizers)
GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global variables.
void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)
Emit code in this function to perform a guarded variable initialization.
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV, bool PerformInit)
EmitCXXGlobalVarDeclInit - Create the initializer for a C++ variable with global storage.
LValue EmitCoyieldLValue(const CoyieldExpr *E)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
llvm::Value * EmitObjCThrowOperand(const Expr *expr)
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
llvm::Value * emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType, const CGPointerAuthInfo &CurAuthInfo, const CGPointerAuthInfo &NewAuthInfo, bool IsKnownNonNull)
void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc)
EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for RD using llvm....
void EmitOMPSingleDirective(const OMPSingleDirective &S)
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType)
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
void initFullExprCleanupWithFlag(RawAddress ActiveFlag)
llvm::Value * EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
llvm::CanonicalLoopInfo * EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth)
Emit the Stmt S and return its topmost canonical loop, if any.
llvm::Value * EmitRISCVCpuSupports(ArrayRef< StringRef > FeaturesStrs)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
llvm::Value * LoadObjCSelf()
LoadObjCSelf - Load the value of self.
bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO, bool IsVolatile, bool isInit)
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
void EmitARCCopyWeak(Address dst, Address src)
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
void pushSEHCleanup(CleanupKind kind, llvm::Function *FinallyFunc)
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void PushDestructorCleanup(QualType T, Address Addr)
PushDestructorCleanup - Push a cleanup to call the complete-object destructor of an object of the giv...
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, AggValueSlot ThisAVS, const CXXConstructExpr *E)
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD, CXXDtorType Type, const CXXRecordDecl *RD)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
uint64_t getCurrentProfileCount()
Get the profiler's current count.
auto getIsCounterPair(const Stmt *S) const
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
llvm::Value * EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty)
llvm::Value * EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
llvm::Value * EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty, const llvm::CmpInst::Predicate Fp, const llvm::CmpInst::Predicate Ip, const llvm::Twine &Name="")
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum, llvm::Value *ptr)
void defaultInitNonTrivialCStructVar(LValue Dst)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
llvm::ScalableVectorType * getSVEType(const SVETypeFlags &TypeFlags)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke=nullptr, bool IsMustTail=false)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
void EmitSwitchStmt(const SwitchStmt &S)
LValue EmitLValueForLambdaField(const FieldDecl *Field, llvm::Value *ThisValue)
bool isTrivialInitializer(const Expr *Init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF, Address ParentVar, llvm::Value *ParentFP)
Recovers the address of a local in a parent function.
const FieldDecl * FindFlexibleArrayMemberFieldAndOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FAMDecl, uint64_t &Offset)
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn, llvm::Constant *addr)
Call atexit() with a function that passes the given argument to the given function.
llvm::Value * EmitRISCVCpuIs(const CallExpr *E)
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
Address EmitVAListRef(const Expr *E)
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
Address emitBlockByrefAddress(Address baseAddr, const BlockByrefInfo &info, bool followForward, const llvm::Twine &name)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
llvm::Value * EmitLoadOfScalar(LValue lvalue, SourceLocation Loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
void EmitSEHTryStmt(const SEHTryStmt &S)
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
llvm::Value * EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty, bool negateForRightShift)
void ExitSEHTryStmt(const SEHTryStmt &S)
llvm::Constant * GenerateCopyHelperFunction(const CGBlockInfo &blockInfo)
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind)
Emit final update of reduction values to original variables at the end of the directive.
llvm::Value * unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub)
Call unatexit() with function dtorStub.
llvm::BasicBlock * OMPScanDispatch
llvm::BasicBlock * getTerminateLandingPad()
getTerminateLandingPad - Return a landing pad that just calls terminate.
llvm::BasicBlock * getTerminateHandler()
getTerminateHandler - Return a handler (not a landing pad, just a catch handler) that just calls term...
void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr)
llvm::Value * EmitSVEMovl(const SVETypeFlags &TypeFlags, llvm::ArrayRef< llvm::Value * > Ops, unsigned BuiltinID)
llvm::function_ref< std::pair< LValue, LValue >(CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
llvm::Value * EmitARCRetainAutoreleaseScalarExpr(const Expr *expr)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
void setBeforeOutermostConditional(llvm::Value *value, Address addr, CodeGenFunction &CGF)
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase, bool Delegating, CallArgList &Args)
Emit a call to an inheriting constructor (that is, one that invokes a constructor inherited from a ba...
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
llvm::Type * getEltType(const SVETypeFlags &TypeFlags)
CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false)
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, OMPTargetDataInfo &InputInfo)
void EmitDeclStmt(const DeclStmt &S)
void EmitOMPScopeDirective(const OMPScopeDirective &S)
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock=false)
static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor)
llvm::Function * GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info, const DeclMapTy &ldm, bool IsLambdaConversionToBlock, bool BuildGlobalBlock)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
static void EmitOMPTargetParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForDirective &S)
Emit device code for the target parallel for directive.
llvm::Value * EmitSVEPMull(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void EmitCoroutineBody(const CoroutineBodyStmt &S)
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, Address This, CallArgList &Args, AggValueSlot::Overlap_t Overlap, SourceLocation Loc, bool NewPointerIsChecked, llvm::CallBase **CallOrInvoke=nullptr)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy, QualType RTy)
void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind, RawAddress ActiveFlag, As... A)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub)
Call atexit() with function dtorStub.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitOMPSimdInit(const OMPLoopDirective &D)
Helpers for the OpenMP loop directives.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
llvm::Value * EmitSVEScatterStore(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
void EmitConstructorBody(FunctionArgList &Args)
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
int ExpectedOMPLoopDepth
Number of nested loop to be consumed by the last surrounding loop-associated directive.
void EmitVarDecl(const VarDecl &D)
EmitVarDecl - Emit a local variable declaration.
llvm::Value * EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD, NestedNameSpecifier *Qual, llvm::Type *Ty)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This)
Emit assumption load for all bases.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::Constant * GenerateObjCAtomicSetterCopyHelperFunction(const ObjCPropertyImplDecl *PID)
ComplexPairTy EmitUnPromotedValue(ComplexPairTy result, QualType PromotionType)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeSimdDirective &S)
Emit device code for the target teams distribute simd directive.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
SmallVector< llvm::CanonicalLoopInfo *, 4 > OMPLoopNestStack
List of recently emitted OMPCanonicalLoops.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB, const CodeGenLoopBoundsTy &CodeGenLoopBounds, const CodeGenDispatchBoundsTy &CGDispatchBounds)
Emit code for the worksharing loop-based directive.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
llvm::Value * LoadCXXVTT()
LoadCXXVTT - Load the VTT parameter to base constructors/destructors have virtual bases.
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void EmitOMPLinearClause(const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope)
Emit initial code for linear clauses.
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
void StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo, bool IsUnprototyped)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr)
LValue EmitMemberExpr(const MemberExpr *E)
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
llvm::Value * SEHInfo
Value returned by __exception_info intrinsic.
llvm::Value * BuildVector(ArrayRef< llvm::Value * > Ops)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
ConstantEmission tryEmitAsConstant(const MemberExpr *ME)
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
void EmitAutoVarCleanups(const AutoVarEmission &emission)
llvm::GlobalVariable * AddInitializerToStaticVarDecl(const VarDecl &D, llvm::GlobalVariable *GV)
AddInitializerToStaticVarDecl - Add the initializer for 'D' to the global variable that has already b...
llvm::Value * EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void EmitOMPTileDirective(const OMPTileDirective &S)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
bool EmitOMPLinearClauseInit(const OMPLoopDirective &D)
Emit initial code for linear variables.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
llvm::BasicBlock * EmitLandingPad()
Emits a landing pad for the current EH stack.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, llvm::Value *VTable, SourceLocation Loc)
If whole-program virtual table optimization is enabled, emit an assumption that VTable is a member of...
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit)
Helper for the OpenMP loop directives.
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
VlaSizePair getVLASize(QualType vla)
llvm::Value * EmitSVEPredicateTupleCast(llvm::Value *PredTuple, llvm::StructType *Ty)
llvm::Value * EmitObjCMRRAutoreleasePoolPush()
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size)
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitOMPLinearClauseFinal(const OMPLoopDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
Emit final code for linear clauses.
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
void EmitARCInitWeak(Address addr, llvm::Value *value)
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
llvm::BasicBlock * OMPScanExitBlock
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForSimdDirective &S)
Emit device code for the target teams distribute parallel for simd directive.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * EmitRISCVCpuIs(StringRef CPUStr)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr, RawAddress *Alloca=nullptr)
void EmitOMPUseDeviceAddrClause(const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap)
void generateObjCSetterBody(const ObjCImplementationDecl *classImpl, const ObjCPropertyImplDecl *propImpl, llvm::Constant *AtomicHelperFn)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
llvm::Value * EmitSMEReadWrite(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::Value * EmitSMELd1St1(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
ActivateCleanupBlock - Activates an initially-inactive cleanup.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, size_t OldLifetimeExtendedStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added,...
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D, llvm::GlobalVariable *Addr, bool PerformInit)
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
LValue EmitStringLiteralLValue(const StringLiteral *E)
void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt, bool IsFilter)
Scan the outlined statement for captures from the parent function.
static Destroyer destroyARCStrongPrecise
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type, FunctionArgList &Args)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc, llvm::AtomicOrdering AO, bool IsVolatile=false, AggValueSlot slot=AggValueSlot::ignored())
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitSVEStructLoad(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD, CallArgList &CallArgs)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
LValue EmitOMPSharedLValue(const Expr *E)
Emits the lvalue for the expression with possibly captured variable.
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
void processInReduction(const OMPExecutableDirective &S, OMPTaskDataTy &Data, CodeGenFunction &CGF, const CapturedStmt *CS, OMPPrivateScope &Scope)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void pushStackRestore(CleanupKind kind, Address SPMem)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
llvm::Value * EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt, llvm::Type *Ty, bool usgn, const char *name)
void GenerateObjCSetter(ObjCImplementationDecl *IMP, const ObjCPropertyImplDecl *PID)
GenerateObjCSetter - Synthesize an Objective-C property setter function for the given property.
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
bool EmitOMPCopyinClause(const OMPExecutableDirective &D)
Emit code for copyin clause in D directive.
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
SmallVector< llvm::Type *, 2 > getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType, ArrayRef< llvm::Value * > Ops)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPPrivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::Function * GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, SourceLocation Loc)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isNontemporal=false)
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr)
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
static void EmitOMPTargetParallelGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelGenericLoopDirective &S)
Emit device code for the target parallel loop directive.
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
llvm::Value * EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void GenerateObjCGetter(ObjCImplementationDecl *IMP, const ObjCPropertyImplDecl *PID)
GenerateObjCGetter - Synthesize an Objective-C property getter function.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
const CGFunctionInfo * CurFnInfo
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
llvm::Value * EmitSVEStructStore(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::BasicBlock * getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope)
void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr)
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
void pushKmpcAllocFree(CleanupKind Kind, std::pair< llvm::Value *, llvm::Value * > AddrSizePair)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
llvm::Value * EmitSEHAbnormalTermination()
void EmitCoreturnStmt(const CoreturnStmt &S)
void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type)
EnterDtorCleanups - Enter the cleanups necessary to complete the given phase of destruction for a des...
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
llvm::BasicBlock * OMPAfterScanBlock
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
void EmitOMPErrorDirective(const OMPErrorDirective &S)
static Destroyer destroyARCStrongImprecise
void EmitOMPSectionDirective(const OMPSectionDirective &S)
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
static void EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelDirective &S)
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitSVEAllTruePred(const SVETypeFlags &TypeFlags)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock=false)
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
llvm::Value * EmitObjCAlloc(llvm::Value *value, llvm::Type *returnType)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
llvm::Instruction * CurrentFuncletPad
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
llvm::Type * SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags)
SVEBuiltinMemEltTy - Returns the memory element type for this memory access builtin.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst, const CallExpr *E)
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo)
Emits the alloca and debug information for the size expressions for each dimension of an array.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
llvm::SmallVector< VPtr, 4 > VPtrsVector
llvm::Value * EmitSMEZero(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::Value * getSelectorFromSlot()
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
Emit initial code for lastprivate variables.
static std::string getNonTrivialCopyConstructorStr(QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx)
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl)
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
llvm::Value * EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, const char *NameHint, unsigned Modifier, const CallExpr *E, SmallVectorImpl< llvm::Value * > &Ops, Address PtrOp0, Address PtrOp1, llvm::Triple::ArchType Arch)
void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, bool BaseIsNonVirtualPrimaryBase, const CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs)
llvm::function_ref< void(CodeGenFunction &, const OMPLoopDirective &, JumpDest)> CodeGenLoopTy
void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags, bool CanThrow)
llvm::Value * EmitNeonCall(llvm::Function *F, SmallVectorImpl< llvm::Value * > &O, const char *name, unsigned shift=0, bool rightshift=false)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void InitializeVTablePointer(const VPtr &vptr)
Initialize the vtable pointer of the given subobject.
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Value *Chain=nullptr, llvm::CallBase **CallOrInvoke=nullptr, CGFunctionInfo const **ResolvedFnInfo=nullptr)
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
Address emitAddrOfRealComponent(Address complex, QualType complexType)
void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
llvm::ScalableVectorType * getSVEPredType(const SVETypeFlags &TypeFlags)
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * getExceptionFromSlot()
Returns the contents of the function's exception object and selector slots.
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
llvm::Value * EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< Address > args, const Twine &name="")
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
RValue EmitAtomicExpr(AtomicExpr *E)
RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy, llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema, llvm::Value *StorageAddress, GlobalDecl SchemaDecl, QualType SchemaType)
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, llvm::BasicBlock *InitBlock, llvm::BasicBlock *NoInitBlock, GuardKind Kind, const VarDecl *D)
Emit a branch to select whether or not to perform guarded initialization.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
LValue EmitLoadOfReferenceLValue(Address RefAddr, QualType RefTy, AlignmentSource Source=AlignmentSource::Type)
std::pair< bool, RValue > EmitOMPAtomicSimpleUpdateExpr(LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, llvm::AtomicOrdering AO, SourceLocation Loc, const llvm::function_ref< RValue(RValue)> CommonGen)
Emit atomic update code for constructs: X = X BO E or X = E BO E.
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr, llvm::FunctionCallee Callee)
Emit a musttail call for a thunk with a potentially adjusted this pointer.
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
llvm::Value * EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags, ArrayRef< llvm::Value * > Ops)
llvm::Type * ConvertType(const TypeDecl *T)
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
const llvm::DataLayout & getDataLayout() const
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:294
static ConstantAddress invalid()
Definition: Address.h:302
DominatingValue< Address >::saved_type AggregateAddr
static saved_type save(CodeGenFunction &CGF, RValue value)
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:141
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:203
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:94
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
void pushCleanupTuple(CleanupKind Kind, std::tuple< As... > A)
Push a lazily-created cleanup on the stack. Tuple version.
Definition: EHScopeStack.h:295
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
LValue - This represents an lvalue references.
Definition: CGValue.h:182
CharUnits getAlignment() const
Definition: CGValue.h:343
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:432
QualType getType() const
Definition: CGValue.h:291
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
A stack of loop information corresponding to loop nesting levels.
Definition: CGLoopInfo.h:204
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
An abstract representation of an aligned address.
Definition: Address.h:42
static RawAddress invalid()
Definition: Address.h:61
bool isValid() const
Definition: Address.h:62
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:47
The class detects jumps which bypass local variables declaration: goto L; int a; L:
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:4171
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3477
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1628
ContinueStmt - This represents a continue.
Definition: Stmt.h:2977
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition: StmtCXX.h:473
Represents the body of a coroutine.
Definition: StmtCXX.h:320
Represents an expression that might suspend coroutine execution; either a co_await or co_yield expres...
Definition: ExprCXX.h:5077
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5272
Represents the current source location and context used to determine the value of the source location...
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2369
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1519
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2752
This represents one expression.
Definition: Expr.h:110
QualType getType() const
Definition: Expr.h:142
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6354
Represents a member of a struct/union/class.
Definition: Decl.h:3033
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2808
Represents a function declaration or definition.
Definition: Decl.h:1935
Represents a prototype with parameter type info, e.g.
Definition: Type.h:5107
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2889
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition: Expr.h:7152
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2165
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2928
Describes an C or C++ initializer list.
Definition: Expr.h:5088
Represents the declaration of a label.
Definition: Decl.h:503
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2058
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:287
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4734
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2796
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3236
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3519
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents '#pragma omp atomic' directive.
Definition: StmtOpenMP.h:2947
This represents '#pragma omp barrier' directive.
Definition: StmtOpenMP.h:2625
This represents '#pragma omp cancel' directive.
Definition: StmtOpenMP.h:3655
This represents '#pragma omp cancellation point' directive.
Definition: StmtOpenMP.h:3597
Representation of an OpenMP canonical loop.
Definition: StmtOpenMP.h:142
This represents '#pragma omp critical' directive.
Definition: StmtOpenMP.h:2076
This represents '#pragma omp depobj' directive.
Definition: StmtOpenMP.h:2841
This represents '#pragma omp distribute' directive.
Definition: StmtOpenMP.h:4425
This represents '#pragma omp distribute parallel for' composite directive.
Definition: StmtOpenMP.h:4547
This represents '#pragma omp distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:4643
This represents '#pragma omp distribute simd' composite directive.
Definition: StmtOpenMP.h:4708
This represents '#pragma omp error' directive.
Definition: StmtOpenMP.h:6432
This is a basic class for representing single OpenMP executable directive.
Definition: StmtOpenMP.h:266
This represents '#pragma omp flush' directive.
Definition: StmtOpenMP.h:2789
This represents '#pragma omp for' directive.
Definition: StmtOpenMP.h:1634
This represents '#pragma omp for simd' directive.
Definition: StmtOpenMP.h:1724
This represents '#pragma omp loop' directive.
Definition: StmtOpenMP.h:6103
Represents the '#pragma omp interchange' loop transformation directive.
Definition: StmtOpenMP.h:5769
This represents '#pragma omp interop' directive.
Definition: StmtOpenMP.h:5895
This is a common base class for loop directives ('omp simd', 'omp for', 'omp for simd' etc....
Definition: StmtOpenMP.h:1004
This represents '#pragma omp masked' directive.
Definition: StmtOpenMP.h:6013
This represents '#pragma omp masked taskloop' directive.
Definition: StmtOpenMP.h:3930
This represents '#pragma omp masked taskloop simd' directive.
Definition: StmtOpenMP.h:4071
This represents '#pragma omp master' directive.
Definition: StmtOpenMP.h:2028
This represents '#pragma omp master taskloop' directive.
Definition: StmtOpenMP.h:3854
This represents '#pragma omp master taskloop simd' directive.
Definition: StmtOpenMP.h:4006
This represents '#pragma omp metadirective' directive.
Definition: StmtOpenMP.h:6064
This represents '#pragma omp ordered' directive.
Definition: StmtOpenMP.h:2893
This represents '#pragma omp parallel' directive.
Definition: StmtOpenMP.h:612
This represents '#pragma omp parallel for' directive.
Definition: StmtOpenMP.h:2147
This represents '#pragma omp parallel for simd' directive.
Definition: StmtOpenMP.h:2244
This represents '#pragma omp parallel masked' directive.
Definition: StmtOpenMP.h:2372
This represents '#pragma omp parallel masked taskloop' directive.
Definition: StmtOpenMP.h:4215
This represents '#pragma omp parallel master' directive.
Definition: StmtOpenMP.h:2309
This represents '#pragma omp parallel master taskloop' directive.
Definition: StmtOpenMP.h:4137
This represents '#pragma omp parallel master taskloop simd' directive.
Definition: StmtOpenMP.h:4293
This represents '#pragma omp parallel sections' directive.
Definition: StmtOpenMP.h:2436
Represents the '#pragma omp reverse' loop transformation directive.
Definition: StmtOpenMP.h:5704
This represents '#pragma omp scan' directive.
Definition: StmtOpenMP.h:5842
This represents '#pragma omp scope' directive.
Definition: StmtOpenMP.h:1925
This represents '#pragma omp section' directive.
Definition: StmtOpenMP.h:1864
This represents '#pragma omp sections' directive.
Definition: StmtOpenMP.h:1787
This represents '#pragma omp simd' directive.
Definition: StmtOpenMP.h:1571
This represents '#pragma omp single' directive.
Definition: StmtOpenMP.h:1977
This represents '#pragma omp target data' directive.
Definition: StmtOpenMP.h:3206
This represents '#pragma omp target' directive.
Definition: StmtOpenMP.h:3152
This represents '#pragma omp target enter data' directive.
Definition: StmtOpenMP.h:3260
This represents '#pragma omp target exit data' directive.
Definition: StmtOpenMP.h:3315
This represents '#pragma omp target parallel' directive.
Definition: StmtOpenMP.h:3369
This represents '#pragma omp target parallel for' directive.
Definition: StmtOpenMP.h:3449
This represents '#pragma omp target parallel for simd' directive.
Definition: StmtOpenMP.h:4774
This represents '#pragma omp target parallel loop' directive.
Definition: StmtOpenMP.h:6370
This represents '#pragma omp target simd' directive.
Definition: StmtOpenMP.h:4841
This represents '#pragma omp target teams' directive.
Definition: StmtOpenMP.h:5199
This represents '#pragma omp target teams distribute' combined directive.
Definition: StmtOpenMP.h:5255
This represents '#pragma omp target teams distribute parallel for' combined directive.
Definition: StmtOpenMP.h:5322
This represents '#pragma omp target teams distribute parallel for simd' combined directive.
Definition: StmtOpenMP.h:5420
This represents '#pragma omp target teams distribute simd' combined directive.
Definition: StmtOpenMP.h:5490
This represents '#pragma omp target teams loop' directive.
Definition: StmtOpenMP.h:6230
This represents '#pragma omp target update' directive.
Definition: StmtOpenMP.h:4491
This represents '#pragma omp task' directive.
Definition: StmtOpenMP.h:2517
This represents '#pragma omp taskloop' directive.
Definition: StmtOpenMP.h:3715
This represents '#pragma omp taskloop simd' directive.
Definition: StmtOpenMP.h:3788
This represents '#pragma omp taskgroup' directive.
Definition: StmtOpenMP.h:2722
This represents '#pragma omp taskwait' directive.
Definition: StmtOpenMP.h:2671
This represents '#pragma omp taskyield' directive.
Definition: StmtOpenMP.h:2579
This represents '#pragma omp teams' directive.
Definition: StmtOpenMP.h:3544
This represents '#pragma omp teams distribute' directive.
Definition: StmtOpenMP.h:4906
This represents '#pragma omp teams distribute parallel for' composite directive.
Definition: StmtOpenMP.h:5106
This represents '#pragma omp teams distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:5040
This represents '#pragma omp teams distribute simd' combined directive.
Definition: StmtOpenMP.h:4972
This represents '#pragma omp teams loop' directive.
Definition: StmtOpenMP.h:6165
This represents the '#pragma omp tile' loop transformation directive.
Definition: StmtOpenMP.h:5548
This represents the '#pragma omp unroll' loop transformation directive.
Definition: StmtOpenMP.h:5630
This represents clause 'use_device_addr' in the '#pragma omp ...' directives.
This represents clause 'use_device_ptr' in the '#pragma omp ...' directives.
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp,...
Definition: ExprObjC.h:191
Represents Objective-C's @synchronized statement.
Definition: StmtObjC.h:303
Represents Objective-C's @throw statement.
Definition: StmtObjC.h:358
Represents Objective-C's @try ... @catch ... @finally statement.
Definition: StmtObjC.h:167
Represents Objective-C's @autoreleasepool Statement.
Definition: StmtObjC.h:394
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:127
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:947
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:309
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents Objective-C's collection statement.
Definition: StmtObjC.h:23
ObjCImplementationDecl - Represents a class definition - this is where method definitions are specifi...
Definition: DeclObjC.h:2596
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition: ExprObjC.h:1487
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1951
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:941
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:140
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2804
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:505
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:51
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Definition: StmtOpenACC.h:131
This class represents a 'loop' construct.
Definition: StmtOpenACC.h:194
Represents a parameter to a function.
Definition: Decl.h:1725
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3198
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6546
A (possibly-)qualified type.
Definition: Type.h:929
@ DK_cxx_destructor
Definition: Type.h:1521
@ DK_nontrivial_c_struct
Definition: Type.h:1524
@ DK_objc_weak_lifetime
Definition: Type.h:1523
@ DK_objc_strong_lifetime
Definition: Type.h:1522
The collection of all-type qualifiers we support.
Definition: Type.h:324
Represents a struct/union/class.
Definition: Decl.h:4162
bool hasVolatileMember() const
Definition: Decl.h:4225
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6077
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3046
Represents a __leave statement.
Definition: Stmt.h:3745
Flags to identify the types for overloaded SVE builtins.
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
A trivial tuple used to represent a source range.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4466
Stmt - This represents one statement.
Definition: Stmt.h:84
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1323
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1325
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2415
Exposes information about the current target.
Definition: TargetInfo.h:220
Represents a declaration of a type.
Definition: Decl.h:3384
The base class of the type hierarchy.
Definition: Type.h:1828
bool isReferenceType() const
Definition: Type.h:8209
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8736
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2232
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4750
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
QualType getType() const
Definition: Decl.h:682
Represents a variable declaration or definition.
Definition: Decl.h:882
VarDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: Decl.cpp:2246
bool isLocalVarDeclOrParm() const
Similar to isLocalVarDecl but also includes parameters.
Definition: Decl.h:1213
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3808
Expr * getSizeExpr() const
Definition: Type.h:3827
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2611
Defines the clang::TargetInfo interface.
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
ARCPreciseLifetime_t
Does an ARC strong l-value have precise lifetime?
Definition: CGValue.h:135
@ NotKnownNonNull
Definition: Address.h:33
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
llvm::omp::Directive OpenMPDirectiveKind
OpenMP directives.
Definition: OpenMPKinds.h:25
BinaryOperatorKind
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ CR_Default
Definition: CapturedStmt.h:17
OpenMPDistScheduleClauseKind
OpenMP attributes for 'dist_schedule' clause.
Definition: OpenMPKinds.h:104
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
Definition: Linkage.h:24
@ Result
The result type of a method or function.
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
const FunctionProtoType * T
@ Success
Template argument deduction was successful.
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
#define true
Definition: stdbool.h:25
#define false
Definition: stdbool.h:26
Structure with information about how a bitfield should be accessed.
llvm::SmallVector< llvm::AllocaInst * > Take()
CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
FMVResolverOption(llvm::Function *F, ArrayRef< StringRef > Feats, std::optional< StringRef > Arch=std::nullopt)
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth, unsigned Index)
Header for data within LifetimeExtendedCleanupStack.
unsigned Size
The size of the following cleanup object.
unsigned IsConditional
Whether this is a conditional cleanup.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::OpenMPIRBuilder::InsertPointTy InsertPointTy
static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Twine RegionName)
Emit the body of an OMP region that will be outlined in OpenMPIRBuilder::finalize().
static Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable /p VD.
static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP, llvm::BasicBlock &FiniBB, llvm::Function *Fn, ArrayRef< llvm::Value * > Args)
static std::string getNameWithSeparators(ArrayRef< StringRef > Parts, StringRef FirstSeparator=".", StringRef Separator=".")
Get the platform-specific name separator.
static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP)
Emit the Finalization for an OMP region.
static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Twine RegionName)
Emit the body of an OMP region.
OMPBuilderCBHelpers & operator=(const OMPBuilderCBHelpers &)=delete
OMPBuilderCBHelpers(const OMPBuilderCBHelpers &)=delete
OMPTargetDataInfo(Address BasePointersArray, Address PointersArray, Address SizesArray, Address MappersArray, unsigned NumberOfTargetItems)
llvm::PointerUnion< const FunctionProtoType *, const ObjCMethodDecl * > P
Struct with all information about dynamic [sub]class needed to set vptr.
This structure provides a set of types that are commonly used during IR emission.
Helper class with most of the code for saving a value for a conditional expression cleanup.
static llvm::Value * restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, llvm::Value *value)
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
llvm::PointerIntPair< llvm::Value *, 1, bool > saved_type
static type restore(CodeGenFunction &CGF, saved_type value)
static type restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, type value)
static saved_type save(CodeGenFunction &CGF, type value)
static type restore(CodeGenFunction &CGF, saved_type value)
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function.
Definition: EHScopeStack.h:65
static saved_type save(CodeGenFunction &CGF, type value)
Definition: EHScopeStack.h:59
Scheduling data for loop-based OpenMP directives.
Definition: OpenMPKinds.h:180
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition: Thunk.h:157