clang 22.0.0git
CIRGenFunction.h
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
14#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
15
16#include "CIRGenBuilder.h"
17#include "CIRGenCall.h"
18#include "CIRGenModule.h"
19#include "CIRGenTypeCache.h"
20#include "CIRGenValue.h"
21#include "EHScopeStack.h"
22
23#include "Address.h"
24
27#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/ExprCXX.h"
31#include "clang/AST/Stmt.h"
32#include "clang/AST/Type.h"
37#include "llvm/ADT/ScopedHashTable.h"
38
39namespace {
40class ScalarExprEmitter;
41} // namespace
42
43namespace mlir {
44namespace acc {
45class LoopOp;
46} // namespace acc
47} // namespace mlir
48
49namespace clang::CIRGen {
50
51struct CGCoroData;
52
54public:
56
57private:
58 friend class ::ScalarExprEmitter;
59 /// The builder is a helper class to create IR inside a function. The
60 /// builder is stateful, in particular it keeps an "insertion point": this
61 /// is where the next operations will be introduced.
62 CIRGenBuilderTy &builder;
63
64 /// A jump destination is an abstract label, branching to which may
65 /// require a jump out through normal cleanups.
66 struct JumpDest {
67 JumpDest() = default;
68 JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
69 unsigned index = 0)
70 : block(block) {}
71
72 bool isValid() const { return block != nullptr; }
73 mlir::Block *getBlock() const { return block; }
74 EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
75 unsigned getDestIndex() const { return index; }
76
77 // This should be used cautiously.
78 void setScopeDepth(EHScopeStack::stable_iterator depth) {
79 scopeDepth = depth;
80 }
81
82 private:
83 mlir::Block *block = nullptr;
85 unsigned index;
86 };
87
88public:
89 /// The GlobalDecl for the current function being compiled or the global
90 /// variable currently being initialized.
92
93 /// Unified return block.
94 /// In CIR this is a function because each scope might have
95 /// its associated return block.
96 JumpDest returnBlock(mlir::Block *retBlock) {
97 return getJumpDestInCurrentScope(retBlock);
98 }
99
101
102 /// The compiler-generated variable that holds the return value.
103 std::optional<mlir::Value> fnRetAlloca;
104
105 // Holds coroutine data if the current function is a coroutine. We use a
106 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
107 // in this header.
108 struct CGCoroInfo {
109 std::unique_ptr<CGCoroData> data;
110 CGCoroInfo();
111 ~CGCoroInfo();
112 };
114
115 bool isCoroutine() const { return curCoro.data != nullptr; }
116
117 /// The temporary alloca to hold the return value. This is
118 /// invalid iff the function has no return value.
120
121 /// Tracks function scope overall cleanup handling.
123
125
126 /// A mapping from NRVO variables to the flags used to indicate
127 /// when the NRVO has been applied to this variable.
128 llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
129
130 llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
133
134 /// CXXThisDecl - When generating code for a C++ member function,
135 /// this will hold the implicit 'this' declaration.
137 mlir::Value cxxabiThisValue = nullptr;
138 mlir::Value cxxThisValue = nullptr;
140
141 /// When generating code for a constructor or destructor, this will hold the
142 /// implicit argument (e.g. VTT).
145
146 /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
147 /// expression.
149
150 // Holds the Decl for the current outermost non-closure context
151 const clang::Decl *curFuncDecl = nullptr;
152 /// This is the inner-most code context, which includes blocks.
153 const clang::Decl *curCodeDecl = nullptr;
154
155 /// The current function or global initializer that is generated code for.
156 /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
157 /// global initializers.
158 mlir::Operation *curFn = nullptr;
159
160 /// Save Parameter Decl for coroutine.
162
163 using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
164 /// This keeps track of the CIR allocas or globals for local C
165 /// declarations.
167
168 /// The type of the condition for the emitting switch statement.
170
171 clang::ASTContext &getContext() const { return cgm.getASTContext(); }
172
173 CIRGenBuilderTy &getBuilder() { return builder; }
174
176 const CIRGenModule &getCIRGenModule() const { return cgm; }
177
179 // We currently assume this isn't called for a global initializer.
180 auto fn = mlir::cast<cir::FuncOp>(curFn);
181 return &fn.getRegion().front();
182 }
183
184 /// Sanitizers enabled for this function.
186
187 /// The symbol table maps a variable name to a value in the current scope.
188 /// Entering a function creates a new scope, and the function arguments are
189 /// added to the mapping. When the processing of a function is terminated,
190 /// the scope is destroyed and the mappings created in this scope are
191 /// dropped.
192 using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
194
195 /// Whether a cir.stacksave operation has been added. Used to avoid
196 /// inserting cir.stacksave for multiple VLAs in the same scope.
197 bool didCallStackSave = false;
198
199 /// Whether or not a Microsoft-style asm block has been processed within
200 /// this fuction. These can potentially set the return value.
201 bool sawAsmBlock = false;
202
203 mlir::Type convertTypeForMem(QualType t);
204
205 mlir::Type convertType(clang::QualType t);
206 mlir::Type convertType(const TypeDecl *t) {
207 return convertType(getContext().getTypeDeclType(t));
208 }
209
210 /// Get integer from a mlir::Value that is an int constant or a constant op.
211 static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
212 auto constOp = val.getDefiningOp<cir::ConstantOp>();
213 assert(constOp && "getIntValueFromConstOp call with non ConstantOp");
214 return constOp.getIntValue().getSExtValue();
215 }
216
217 /// Get zero-extended integer from a mlir::Value that is an int constant or a
218 /// constant op.
219 static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
220 auto constOp = val.getDefiningOp<cir::ConstantOp>();
221 assert(constOp &&
222 "getZeroExtendedIntValueFromConstOp call with non ConstantOp");
223 return constOp.getIntValue().getZExtValue();
224 }
225
226 /// Return the cir::TypeEvaluationKind of QualType \c type.
228
232
236
238 bool suppressNewContext = false);
240
241 CIRGenTypes &getTypes() const { return cgm.getTypes(); }
242
243 const TargetInfo &getTarget() const { return cgm.getTarget(); }
244 mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
245
247 return cgm.getTargetCIRGenInfo();
248 }
249
250 // ---------------------
251 // Opaque value handling
252 // ---------------------
253
254 /// Keeps track of the current set of opaque value expressions.
255 llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
256 llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
257
258 // This keeps track of the associated size for each VLA type.
259 // We track this by the size expression rather than the type itself because
260 // in certain situations, like a const qualifier applied to an VLA typedef,
261 // multiple VLA types can share the same size expression.
262 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
263 // enter/leave scopes.
264 llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
265
266public:
267 /// A non-RAII class containing all the information about a bound
268 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
269 /// this which makes individual mappings very simple; using this
270 /// class directly is useful when you have a variable number of
271 /// opaque values or don't want the RAII functionality for some
272 /// reason.
273 class OpaqueValueMappingData {
274 const OpaqueValueExpr *opaqueValue;
275 bool boundLValue;
276
277 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
278 : opaqueValue(ov), boundLValue(boundLValue) {}
279
280 public:
281 OpaqueValueMappingData() : opaqueValue(nullptr) {}
282
283 static bool shouldBindAsLValue(const Expr *expr) {
284 // gl-values should be bound as l-values for obvious reasons.
285 // Records should be bound as l-values because IR generation
286 // always keeps them in memory. Expressions of function type
287 // act exactly like l-values but are formally required to be
288 // r-values in C.
289 return expr->isGLValue() || expr->getType()->isFunctionType() ||
291 }
292
294 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
295 if (shouldBindAsLValue(ov))
296 return bind(cgf, ov, cgf.emitLValue(e));
297 return bind(cgf, ov, cgf.emitAnyExpr(e));
298 }
299
301 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
302 assert(shouldBindAsLValue(ov));
303 cgf.opaqueLValues.insert(std::make_pair(ov, lv));
304 return OpaqueValueMappingData(ov, true);
305 }
306
308 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
309 assert(!shouldBindAsLValue(ov));
310 cgf.opaqueRValues.insert(std::make_pair(ov, rv));
311
312 OpaqueValueMappingData data(ov, false);
313
314 // Work around an extremely aggressive peephole optimization in
315 // EmitScalarConversion which assumes that all other uses of a
316 // value are extant.
318 return data;
319 }
320
321 bool isValid() const { return opaqueValue != nullptr; }
322 void clear() { opaqueValue = nullptr; }
323
325 assert(opaqueValue && "no data to unbind!");
326
327 if (boundLValue) {
328 cgf.opaqueLValues.erase(opaqueValue);
329 } else {
330 cgf.opaqueRValues.erase(opaqueValue);
332 }
333 }
334 };
335
336 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
338 CIRGenFunction &cgf;
340
341 public:
345
346 /// Build the opaque value mapping for the given conditional
347 /// operator if it's the GNU ?: extension. This is a common
348 /// enough pattern that the convenience operator is really
349 /// helpful.
350 ///
353 : cgf(cgf) {
354 if (mlir::isa<ConditionalOperator>(op))
355 // Leave Data empty.
356 return;
357
359 mlir::cast<BinaryConditionalOperator>(op);
361 e->getCommon());
362 }
363
364 /// Build the opaque value mapping for an OpaqueValueExpr whose source
365 /// expression is set to the expression the OVE represents.
367 : cgf(cgf) {
368 if (ov) {
369 assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
370 "for OVE with no source expression");
371 data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
372 }
373 }
374
376 LValue lvalue)
377 : cgf(cgf),
378 data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
379
381 RValue rvalue)
382 : cgf(cgf),
383 data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
384
385 void pop() {
386 data.unbind(cgf);
387 data.clear();
388 }
389
391 if (data.isValid())
392 data.unbind(cgf);
393 }
394 };
395
396private:
397 /// Declare a variable in the current scope, return success if the variable
398 /// wasn't declared yet.
399 void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
400 mlir::Location loc, clang::CharUnits alignment,
401 bool isParam = false);
402
403public:
404 mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
405
406 void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
407
408private:
409 // Track current variable initialization (if there's one)
410 const clang::VarDecl *currVarDecl = nullptr;
411 class VarDeclContext {
413 const clang::VarDecl *oldVal = nullptr;
414
415 public:
416 VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
417 if (p.currVarDecl)
418 oldVal = p.currVarDecl;
419 p.currVarDecl = value;
420 }
421
422 /// Can be used to restore the state early, before the dtor
423 /// is run.
424 void restore() { p.currVarDecl = oldVal; }
425 ~VarDeclContext() { restore(); }
426 };
427
428public:
429 /// Use to track source locations across nested visitor traversals.
430 /// Always use a `SourceLocRAIIObject` to change currSrcLoc.
431 std::optional<mlir::Location> currSrcLoc;
433 CIRGenFunction &cgf;
434 std::optional<mlir::Location> oldLoc;
435
436 public:
437 SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
438 if (cgf.currSrcLoc)
439 oldLoc = cgf.currSrcLoc;
440 cgf.currSrcLoc = value;
441 }
442
443 /// Can be used to restore the state early, before the dtor
444 /// is run.
445 void restore() { cgf.currSrcLoc = oldLoc; }
447 };
448
450 llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
451
452 /// Hold counters for incrementally naming temporaries
453 unsigned counterRefTmp = 0;
454 unsigned counterAggTmp = 0;
455 std::string getCounterRefTmpAsString();
456 std::string getCounterAggTmpAsString();
457
458 /// Helpers to convert Clang's SourceLocation to a MLIR Location.
459 mlir::Location getLoc(clang::SourceLocation srcLoc);
460 mlir::Location getLoc(clang::SourceRange srcLoc);
461 mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
462
463 const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
464
465 /// True if an insertion point is defined. If not, this indicates that the
466 /// current code being emitted is unreachable.
467 /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
468 /// since we don't yet force null insertion point to designate behavior (like
469 /// LLVM's codegen does) and we probably shouldn't.
470 bool haveInsertPoint() const {
471 return builder.getInsertionBlock() != nullptr;
472 }
473
474 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
475 // an ObjCMethodDecl.
477 llvm::PointerUnion<const clang::FunctionProtoType *,
478 const clang::ObjCMethodDecl *>
480
483 };
484
486
487 /// An abstract representation of regular/ObjC call/message targets.
489 /// The function declaration of the callee.
490 [[maybe_unused]] const clang::Decl *calleeDecl;
491
492 public:
493 AbstractCallee() : calleeDecl(nullptr) {}
494 AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
495
496 bool hasFunctionDecl() const {
497 return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
498 }
499
500 unsigned getNumParams() const {
501 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
502 return fd->getNumParams();
503 return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
504 }
505
506 const clang::ParmVarDecl *getParamDecl(unsigned I) const {
507 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
508 return fd->getParamDecl(I);
509 return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
510 I);
511 }
512 };
513
514 struct VlaSizePair {
515 mlir::Value numElts;
517
518 VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
519 };
520
521 /// Return the number of elements for a single dimension
522 /// for the given array type.
523 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
524
525 /// Returns an MLIR::Value+QualType pair that corresponds to the size,
526 /// in non-variably-sized elements, of a variable length array type,
527 /// plus that largest non-variably-sized element type. Assumes that
528 /// the type has already been emitted with emitVariablyModifiedType.
529 VlaSizePair getVLASize(const VariableArrayType *type);
530 VlaSizePair getVLASize(QualType type);
531
533
534 mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
535 return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
536 }
537
538 void finishFunction(SourceLocation endLoc);
539
540 /// Determine whether the given initializer is trivial in the sense
541 /// that it requires no code to be generated.
542 bool isTrivialInitializer(const Expr *init);
543
544 /// If the specified expression does not fold to a constant, or if it does but
545 /// contains a label, return false. If it constant folds return true and set
546 /// the boolean result in Result.
547 bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
548 bool allowLabels = false);
550 llvm::APSInt &resultInt,
551 bool allowLabels = false);
552
553 /// Return true if the statement contains a label in it. If
554 /// this statement is not executed normally, it not containing a label means
555 /// that we can just remove the code.
556 bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
557
558 Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
559
560 class ConstantEmission {
561 // Cannot use mlir::TypedAttr directly here because of bit availability.
562 llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
563 ConstantEmission(mlir::TypedAttr c, bool isReference)
564 : valueAndIsReference(c, isReference) {}
565
566 public:
568 static ConstantEmission forReference(mlir::TypedAttr c) {
569 return ConstantEmission(c, true);
570 }
571 static ConstantEmission forValue(mlir::TypedAttr c) {
572 return ConstantEmission(c, false);
573 }
574
575 explicit operator bool() const {
576 return valueAndIsReference.getOpaqueValue() != nullptr;
577 }
578
579 bool isReference() const { return valueAndIsReference.getInt(); }
581 assert(isReference());
582 cgf.cgm.errorNYI(refExpr->getSourceRange(),
583 "ConstantEmission::getReferenceLValue");
584 return {};
585 }
586
587 mlir::TypedAttr getValue() const {
588 assert(!isReference());
589 return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
590 }
591 };
592
593 ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
594 ConstantEmission tryEmitAsConstant(const MemberExpr *me);
595
598 /// The address of the alloca for languages with explicit address space
599 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
600 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
601 /// as a global constant.
603
604 /// True if the variable is of aggregate type and has a constant
605 /// initializer.
607
608 /// True if the variable is a __block variable that is captured by an
609 /// escaping block.
610 bool isEscapingByRef = false;
611
612 /// True if the variable was emitted as an offload recipe, and thus doesn't
613 /// have the same sort of alloca initialization.
614 bool emittedAsOffload = false;
615
616 mlir::Value nrvoFlag{};
617
618 struct Invalid {};
620
623
625
626 bool wasEmittedAsGlobal() const { return !addr.isValid(); }
627
629
630 /// Returns the raw, allocated address, which is not necessarily
631 /// the address of the object itself. It is casted to default
632 /// address space for address space agnostic languages.
633 Address getAllocatedAddress() const { return addr; }
634
635 // Changes the stored address for the emission. This function should only
636 // be used in extreme cases, and isn't required to model normal AST
637 // initialization/variables.
639
640 /// Returns the address of the object within this declaration.
641 /// Note that this does not chase the forwarding pointer for
642 /// __block decls.
644 if (!isEscapingByRef)
645 return addr;
646
648 return Address::invalid();
649 }
650 };
651
652 /// The given basic block lies in the current EH scope, but may be a
653 /// target of a potentially scope-crossing jump; get a stable handle
654 /// to which we can perform this jump later.
655 /// CIRGen: this mostly tracks state for figuring out the proper scope
656 /// information, no actual branches are emitted.
657 JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
658 return JumpDest(target, ehStack.getInnermostNormalCleanup(),
660 }
661 /// IndirectBranch - The first time an indirect goto is seen we create a block
662 /// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
663 /// is emitted at the end of the function, once all block destinations have
664 /// been resolved.
665 mlir::Block *indirectGotoBlock = nullptr;
666
669
670 /// Perform the usual unary conversions on the specified expression and
671 /// compare the result against zero, returning an Int1Ty value.
672 mlir::Value evaluateExprAsBool(const clang::Expr *e);
673
674 cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
675 cir::GlobalOp gv,
676 cir::GetGlobalOp gvAddr);
677
678 /// Enter the cleanups necessary to complete the given phase of destruction
679 /// for a destructor. The end result should call destructors on members and
680 /// base classes in reverse order of their construction.
682
683 /// Determines whether an EH cleanup is required to destroy a type
684 /// with the given destruction kind.
685 /// TODO(cir): could be shared with Clang LLVM codegen
687 switch (kind) {
689 return false;
693 return getLangOpts().Exceptions;
695 return getLangOpts().Exceptions &&
696 cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
697 }
698 llvm_unreachable("bad destruction kind");
699 }
700
704
705 void pushStackRestore(CleanupKind kind, Address spMem);
706
707 /// Set the address of a local variable.
709 assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
710 localDeclMap.insert({vd, addr});
711
712 // Add to the symbol table if not there already.
713 if (symbolTable.count(vd))
714 return;
715 symbolTable.insert(vd, addr.getPointer());
716 }
717
718 // Replaces the address of the local variable, if it exists. Else does the
719 // same thing as setAddrOfLocalVar.
721 localDeclMap.insert_or_assign(vd, addr);
722 }
723
724 // A class to allow reverting changes to a var-decl's registration to the
725 // localDeclMap. This is used in cases where things are being inserted into
726 // the variable list but don't follow normal lookup/search rules, like in
727 // OpenACC recipe generation.
729 CIRGenFunction &cgf;
730 const VarDecl *vd;
731 bool shouldDelete = false;
732 Address oldAddr = Address::invalid();
733
734 public:
736 : cgf(cgf), vd(vd) {
737 auto mapItr = cgf.localDeclMap.find(vd);
738
739 if (mapItr != cgf.localDeclMap.end())
740 oldAddr = mapItr->second;
741 else
742 shouldDelete = true;
743 }
744
746 if (shouldDelete)
747 cgf.localDeclMap.erase(vd);
748 else
749 cgf.localDeclMap.insert_or_assign(vd, oldAddr);
750 }
751 };
752
754
757
758 static bool
760
767
770
774 const clang::CXXRecordDecl *nearestVBase,
775 clang::CharUnits offsetFromNearestVBase,
776 bool baseIsNonVirtualPrimaryBase,
777 const clang::CXXRecordDecl *vtableClass,
778 VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
779 /// Return the Value of the vtable pointer member pointed to by thisAddr.
780 mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
781 const clang::CXXRecordDecl *vtableClass);
782
783 /// Returns whether we should perform a type checked load when loading a
784 /// virtual function for virtual calls to members of RD. This is generally
785 /// true when both vcall CFI and whole-program-vtables are enabled.
787
788 /// Source location information about the default argument or member
789 /// initializer expression we're evaluating, if any.
793
794 /// A scope within which we are constructing the fields of an object which
795 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
796 /// we need to evaluate the CXXDefaultInitExpr within the evaluation.
798 public:
800 : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
801 cgf.cxxDefaultInitExprThis = thisAddr;
802 }
804 cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
805 }
806
807 private:
808 CIRGenFunction &cgf;
809 Address oldCXXDefaultInitExprThis;
810 };
811
812 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
813 /// is overridden to be the object under construction.
815 public:
820 cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
821 cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
822 }
824 cgf.cxxThisValue = oldCXXThisValue;
825 cgf.cxxThisAlignment = oldCXXThisAlignment;
826 }
827
828 public:
830 mlir::Value oldCXXThisValue;
833 };
834
839
841 LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
842
843 /// Construct an address with the natural alignment of T. If a pointer to T
844 /// is expected to be signed, the pointer passed to this function must have
845 /// been signed, and the returned Address will have the pointer authentication
846 /// information needed to authenticate the signed pointer.
848 CharUnits alignment,
849 bool forPointeeType = false,
850 LValueBaseInfo *baseInfo = nullptr) {
851 if (alignment.isZero())
852 alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
853 return Address(ptr, convertTypeForMem(t), alignment);
854 }
855
857 Address value, const CXXRecordDecl *derived,
858 llvm::iterator_range<CastExpr::path_const_iterator> path,
859 bool nullCheckValue, SourceLocation loc);
860
862 mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
863 llvm::iterator_range<CastExpr::path_const_iterator> path,
864 bool nullCheckValue);
865
866 /// Return the VTT parameter that should be passed to a base
867 /// constructor/destructor with virtual bases.
868 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
869 /// to ItaniumCXXABI.cpp together with all the references to VTT.
870 mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
871 bool delegating);
872
875 return makeAddrLValue(addr, ty, LValueBaseInfo(source));
876 }
877
879 return LValue::makeAddr(addr, ty, baseInfo);
880 }
881
882 void initializeVTablePointers(mlir::Location loc,
883 const clang::CXXRecordDecl *rd);
884 void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
885
887
888 /// Return the address of a local variable.
890 auto it = localDeclMap.find(vd);
891 assert(it != localDeclMap.end() &&
892 "Invalid argument to getAddrOfLocalVar(), no decl!");
893 return it->second;
894 }
895
897 mlir::Type fieldType, unsigned index);
898
899 /// Given an opaque value expression, return its LValue mapping if it exists,
900 /// otherwise create one.
902
903 /// Given an opaque value expression, return its RValue mapping if it exists,
904 /// otherwise create one.
906
907 /// Load the value for 'this'. This function is only valid while generating
908 /// code for an C++ member function.
909 /// FIXME(cir): this should return a mlir::Value!
910 mlir::Value loadCXXThis() {
911 assert(cxxThisValue && "no 'this' value for this function");
912 return cxxThisValue;
913 }
915
916 /// Load the VTT parameter to base constructors/destructors have virtual
917 /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
918 /// be abstracted properly.
919 mlir::Value loadCXXVTT() {
920 assert(cxxStructorImplicitParamValue && "no VTT value for this function");
922 }
923
924 /// Convert the given pointer to a complete class to the given direct base.
926 Address value,
927 const CXXRecordDecl *derived,
928 const CXXRecordDecl *base,
929 bool baseIsVirtual);
930
931 /// Determine whether a return value slot may overlap some other object.
933 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
934 // class subobjects. These cases may need to be revisited depending on the
935 // resolution of the relevant core issue.
937 }
938
939 /// Determine whether a base class initialization may overlap some other
940 /// object.
942 const CXXRecordDecl *baseRD,
943 bool isVirtual);
944
945 /// Get an appropriate 'undef' rvalue for the given type.
946 /// TODO: What's the equivalent for MLIR? Currently we're only using this for
947 /// void types so it just returns RValue::get(nullptr) but it'll need
948 /// addressed later.
950
951 cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
952 cir::FuncType funcType);
953
955 FunctionArgList &args);
956
957 /// Emit the function prologue: declare function arguments in the symbol
958 /// table.
959 void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
960 const FunctionDecl *fd, SourceLocation bodyBeginLoc);
961
962 /// Emit code for the start of a function.
963 /// \param loc The location to be associated with the function.
964 /// \param startLoc The location of the function body.
966 cir::FuncOp fn, cir::FuncType funcType,
968 clang::SourceLocation startLoc);
969
970 /// returns true if aggregate type has a volatile member.
972 if (const auto *rd = t->getAsRecordDecl())
973 return rd->hasVolatileMember();
974 return false;
975 }
976
977 void populateUnwindResumeBlock(bool isCleanup, cir::TryOp tryOp);
979 cir::TryOp tryOp);
980
981 /// The cleanup depth enclosing all the cleanups associated with the
982 /// parameters.
984
986 void populateCatchHandlersIfRequired(cir::TryOp tryOp);
987
988 /// Takes the old cleanup stack size and emits the cleanup blocks
989 /// that have been added.
990 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth);
991 void popCleanupBlock();
992
993 /// Push a cleanup to be run at the end of the current full-expression. Safe
994 /// against the possibility that we're currently inside a
995 /// conditionally-evaluated expression.
996 template <class T, class... As>
997 void pushFullExprCleanup(CleanupKind kind, As... a) {
998 // If we're not in a conditional branch, or if none of the
999 // arguments requires saving, then use the unconditional cleanup.
1000 if (!isInConditionalBranch())
1001 return ehStack.pushCleanup<T>(kind, a...);
1002
1003 cgm.errorNYI("pushFullExprCleanup in conditional branch");
1004 }
1005
1006 /// Enters a new scope for capturing cleanups, all of which
1007 /// will be executed once the scope is exited.
1008 class RunCleanupsScope {
1009 EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
1010
1011 protected:
1014
1015 private:
1016 RunCleanupsScope(const RunCleanupsScope &) = delete;
1017 void operator=(const RunCleanupsScope &) = delete;
1018
1019 protected:
1021
1022 public:
1023 /// Enter a new cleanup scope.
1025 : performCleanup(true), cgf(cgf) {
1026 cleanupStackDepth = cgf.ehStack.stable_begin();
1027 oldDidCallStackSave = cgf.didCallStackSave;
1028 cgf.didCallStackSave = false;
1029 oldCleanupStackDepth = cgf.currentCleanupStackDepth;
1030 cgf.currentCleanupStackDepth = cleanupStackDepth;
1031 }
1032
1033 /// Exit this cleanup scope, emitting any accumulated cleanups.
1035 if (performCleanup)
1036 forceCleanup();
1037 }
1038
1039 /// Force the emission of cleanups now, instead of waiting
1040 /// until this object is destroyed.
1042 assert(performCleanup && "Already forced cleanup");
1043 {
1044 mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
1045 cgf.didCallStackSave = oldDidCallStackSave;
1046 cgf.popCleanupBlocks(cleanupStackDepth);
1047 performCleanup = false;
1048 cgf.currentCleanupStackDepth = oldCleanupStackDepth;
1049 }
1050 }
1051 };
1052
1053 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1055
1056public:
1057 /// Represents a scope, including function bodies, compound statements, and
1058 /// the substatements of if/while/do/for/switch/try statements. This class
1059 /// handles any automatic cleanup, along with the return value.
1060 struct LexicalScope : public RunCleanupsScope {
1061 private:
1062 // Block containing cleanup code for things initialized in this
1063 // lexical context (scope).
1064 mlir::Block *cleanupBlock = nullptr;
1065
1066 // Points to the scope entry block. This is useful, for instance, for
1067 // helping to insert allocas before finalizing any recursive CodeGen from
1068 // switches.
1069 mlir::Block *entryBlock;
1070
1071 LexicalScope *parentScope = nullptr;
1072
1073 // Holds the actual value for ScopeKind::Try
1074 cir::TryOp tryOp = nullptr;
1075
1076 // Only Regular is used at the moment. Support for other kinds will be
1077 // added as the relevant statements/expressions are upstreamed.
1078 enum Kind {
1079 Regular, // cir.if, cir.scope, if_regions
1080 Ternary, // cir.ternary
1081 Switch, // cir.switch
1082 Try, // cir.try
1083 GlobalInit // cir.global initialization code
1084 };
1085 Kind scopeKind = Kind::Regular;
1086
1087 // The scope return value.
1088 mlir::Value retVal = nullptr;
1089
1090 mlir::Location beginLoc;
1091 mlir::Location endLoc;
1092
1093 public:
1094 unsigned depth = 0;
1095
1096 LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
1097 : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
1098 beginLoc(loc), endLoc(loc) {
1099
1100 assert(entryBlock && "LexicalScope requires an entry block");
1101 cgf.curLexScope = this;
1102 if (parentScope)
1103 ++depth;
1104
1105 if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
1106 assert(fusedLoc.getLocations().size() == 2 && "too many locations");
1107 beginLoc = fusedLoc.getLocations()[0];
1108 endLoc = fusedLoc.getLocations()[1];
1109 }
1110 }
1111
1112 void setRetVal(mlir::Value v) { retVal = v; }
1113
1114 void cleanup();
1115 void restore() { cgf.curLexScope = parentScope; }
1116
1119 cleanup();
1120 restore();
1121 }
1122
1123 // ---
1124 // Kind
1125 // ---
1126 bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
1127 bool isRegular() { return scopeKind == Kind::Regular; }
1128 bool isSwitch() { return scopeKind == Kind::Switch; }
1129 bool isTernary() { return scopeKind == Kind::Ternary; }
1130 bool isTry() { return scopeKind == Kind::Try; }
1131 cir::TryOp getClosestTryParent();
1132 void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
1133 void setAsSwitch() { scopeKind = Kind::Switch; }
1134 void setAsTernary() { scopeKind = Kind::Ternary; }
1135 void setAsTry(cir::TryOp op) {
1136 scopeKind = Kind::Try;
1137 tryOp = op;
1138 }
1139
1140 // Lazy create cleanup block or return what's available.
1141 mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
1142 if (cleanupBlock)
1143 return cleanupBlock;
1144 cleanupBlock = createCleanupBlock(builder);
1145 return cleanupBlock;
1146 }
1147
1148 cir::TryOp getTry() {
1149 assert(isTry());
1150 return tryOp;
1151 }
1152
1153 mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
1154 return cleanupBlock;
1155 }
1156
1157 mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
1158 // Create the cleanup block but dont hook it up around just yet.
1159 mlir::OpBuilder::InsertionGuard guard(builder);
1160 mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
1161 : &cgf.curFn->getRegion(0);
1162 cleanupBlock = builder.createBlock(r);
1163 return cleanupBlock;
1164 }
1165
1166 // ---
1167 // Return handling.
1168 // ---
1169
1170 private:
1171 // On switches we need one return block per region, since cases don't
1172 // have their own scopes but are distinct regions nonetheless.
1173
1174 // TODO: This implementation should change once we have support for early
1175 // exits in MLIR structured control flow (llvm-project#161575)
1177 llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
1178 llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
1179 std::optional<unsigned> normalRetBlockIndex;
1180
1181 // There's usually only one ret block per scope, but this needs to be
1182 // get or create because of potential unreachable return statements, note
1183 // that for those, all source location maps to the first one found.
1184 mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1185 assert((isa_and_nonnull<cir::CaseOp>(
1186 cgf.builder.getBlock()->getParentOp()) ||
1187 retBlocks.size() == 0) &&
1188 "only switches can hold more than one ret block");
1189
1190 // Create the return block but don't hook it up just yet.
1191 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
1192 auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
1193 retBlocks.push_back(b);
1194 updateRetLoc(b, loc);
1195 return b;
1196 }
1197
1198 cir::ReturnOp emitReturn(mlir::Location loc);
1199 void emitImplicitReturn();
1200
1201 public:
1203 mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
1204 void updateRetLoc(mlir::Block *b, mlir::Location loc) {
1205 retLocs.insert_or_assign(b, loc);
1206 }
1207
1208 mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1209 // Check if we're inside a case region
1210 if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
1211 cgf.builder.getBlock()->getParentOp())) {
1212 auto iter = retBlockInCaseIndex.find(caseOp);
1213 if (iter != retBlockInCaseIndex.end()) {
1214 // Reuse existing return block
1215 mlir::Block *ret = retBlocks[iter->second];
1216 updateRetLoc(ret, loc);
1217 return ret;
1218 }
1219 // Create new return block
1220 mlir::Block *ret = createRetBlock(cgf, loc);
1221 retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
1222 return ret;
1223 }
1224
1225 if (normalRetBlockIndex) {
1226 mlir::Block *ret = retBlocks[*normalRetBlockIndex];
1227 updateRetLoc(ret, loc);
1228 return ret;
1229 }
1230
1231 mlir::Block *ret = createRetBlock(cgf, loc);
1232 normalRetBlockIndex = retBlocks.size() - 1;
1233 return ret;
1234 }
1235
1236 mlir::Block *getEntryBlock() { return entryBlock; }
1237 };
1238
1240
1241 typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
1242
1244
1245 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
1246 QualType type);
1247
1248 void pushDestroy(CleanupKind kind, Address addr, QualType type,
1249 Destroyer *destroyer);
1250
1252
1253 /// ----------------------
1254 /// CIR emit functions
1255 /// ----------------------
1256public:
1257 std::optional<mlir::Value>
1258 emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
1260 llvm::Triple::ArchType arch);
1261 std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
1262 const CallExpr *expr);
1263 std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
1264 const CallExpr *expr);
1265
1266 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
1267 SourceLocation loc,
1268 SourceLocation assumptionLoc,
1269 int64_t alignment,
1270 mlir::Value offsetValue = nullptr);
1271
1272 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
1273 SourceLocation assumptionLoc,
1274 int64_t alignment,
1275 mlir::Value offsetValue = nullptr);
1276
1277private:
1278 void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
1279 clang::CharUnits alignment);
1280
1281 CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
1282
1283public:
1285 llvm::StringRef fieldName,
1286 unsigned fieldIndex);
1287
1288 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1289 mlir::Location loc, clang::CharUnits alignment,
1290 bool insertIntoFnEntryBlock,
1291 mlir::Value arraySize = nullptr);
1292 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1293 mlir::Location loc, clang::CharUnits alignment,
1294 mlir::OpBuilder::InsertPoint ip,
1295 mlir::Value arraySize = nullptr);
1296
1297 void emitAggregateStore(mlir::Value value, Address dest);
1298
1299 void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
1300
1302
1303 /// Emit an aggregate copy.
1304 ///
1305 /// \param isVolatile \c true iff either the source or the destination is
1306 /// volatile.
1307 /// \param MayOverlap Whether the tail padding of the destination might be
1308 /// occupied by some other object. More efficient code can often be
1309 /// generated if not.
1310 void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
1311 AggValueSlot::Overlap_t mayOverlap,
1312 bool isVolatile = false);
1313
1314 /// Emit code to compute the specified expression which can have any type. The
1315 /// result is returned as an RValue struct. If this is an aggregate
1316 /// expression, the aggloc/agglocvolatile arguments indicate where the result
1317 /// should be returned.
1320 bool ignoreResult = false);
1321
1322 /// Emits the code necessary to evaluate an arbitrary expression into the
1323 /// given memory location.
1324 void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
1325 bool isInitializer);
1326
1327 /// Similarly to emitAnyExpr(), however, the result will always be accessible
1328 /// even if no aggregate location is provided.
1330
1331 void emitAnyExprToExn(const Expr *e, Address addr);
1332
1333 void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
1334 QualType elementType, CharUnits elementAlign,
1335 Destroyer *destroyer);
1336
1337 mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
1338 QualType &baseType, Address &addr);
1340
1342
1344 LValueBaseInfo *baseInfo = nullptr);
1345
1346 mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
1347
1349 void emitAtomicInit(Expr *init, LValue dest);
1350 void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
1351 void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
1352 bool isVolatile, bool isInit);
1353
1355 mlir::OpBuilder::InsertPoint ip = {});
1356
1357 /// Emit code and set up symbol table for a variable declaration with auto,
1358 /// register, or no storage class specifier. These turn into simple stack
1359 /// objects, globals depending on target.
1360 void emitAutoVarDecl(const clang::VarDecl &d);
1361
1362 void emitAutoVarCleanups(const AutoVarEmission &emission);
1363 /// Emit the initializer for an allocated variable. If this call is not
1364 /// associated with the call to emitAutoVarAlloca (as the address of the
1365 /// emission is not directly an alloca), the allocatedSeparately parameter can
1366 /// be used to suppress the assertions. However, this should only be used in
1367 /// extreme cases, as it doesn't properly reflect the language/AST.
1368 void emitAutoVarInit(const AutoVarEmission &emission);
1369 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1371
1372 void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
1373
1374 void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
1375 CXXCtorInitializer *baseInit);
1376
1378
1379 cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
1380
1381 mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
1382
1383 RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
1385
1386 /// Returns a Value corresponding to the size of the given expression by
1387 /// emitting a `cir.objsize` operation.
1388 ///
1389 /// \param e The expression whose object size to compute
1390 /// \param type Determines the semantics of the object size computation.
1391 /// The type parameter is a 2-bit value where:
1392 /// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
1393 /// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
1394 /// \param resType The result type for the size value
1395 /// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
1396 /// call `cir.objsize` on this value rather than emitting e.
1397 /// \param isDynamic If true, allows runtime evaluation via dynamic mode
1398 mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
1399 cir::IntType resType, mlir::Value emittedE,
1400 bool isDynamic);
1401
1402 mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
1403 unsigned type,
1404 cir::IntType resType,
1405 mlir::Value emittedE,
1406 bool isDynamic);
1407
1408 int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
1409
1411
1412 RValue emitCall(const CIRGenFunctionInfo &funcInfo,
1414 const CallArgList &args, cir::CIRCallOpInterface *callOp,
1415 mlir::Location loc);
1418 const CallArgList &args,
1419 cir::CIRCallOpInterface *callOrTryCall = nullptr) {
1420 assert(currSrcLoc && "source location must have been set");
1421 return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
1422 *currSrcLoc);
1423 }
1424
1425 RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
1427 void emitCallArg(CallArgList &args, const clang::Expr *e,
1428 clang::QualType argType);
1429 void emitCallArgs(
1430 CallArgList &args, PrototypeWrapper prototype,
1431 llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
1432 AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
1437
1438 template <typename T>
1439 mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
1440 mlir::ArrayAttr value,
1441 cir::CaseOpKind kind,
1442 bool buildingTopLevelCase);
1443
1444 mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
1445 mlir::Type condType,
1446 bool buildingTopLevelCase);
1447
1448 LValue emitCastLValue(const CastExpr *e);
1449
1450 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
1451 /// sanitizer is enabled, a runtime check is also emitted.
1452 mlir::Value emitCheckedArgForAssume(const Expr *e);
1453
1454 /// Emit a conversion from the specified complex type to the specified
1455 /// destination type, where the destination type is an LLVM scalar type.
1456 mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
1457 QualType dstTy, SourceLocation loc);
1458
1461
1463
1464 mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
1465 cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1466 cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1467 cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
1468 cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
1469 mlir::Value coroframeAddr);
1471
1472 void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
1473
1475
1476 mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
1477
1479 AggValueSlot dest);
1480
1483 Address arrayBegin, const CXXConstructExpr *e,
1484 bool newPointerIsChecked,
1485 bool zeroInitialize = false);
1487 mlir::Value numElements, Address arrayBase,
1488 const CXXConstructExpr *e,
1489 bool newPointerIsChecked,
1490 bool zeroInitialize);
1492 clang::CXXCtorType type, bool forVirtualBase,
1493 bool delegating, AggValueSlot thisAVS,
1494 const clang::CXXConstructExpr *e);
1495
1497 clang::CXXCtorType type, bool forVirtualBase,
1498 bool delegating, Address thisAddr,
1500
1501 void emitCXXDeleteExpr(const CXXDeleteExpr *e);
1502
1504 bool forVirtualBase, bool delegating,
1505 Address thisAddr, QualType thisTy);
1506
1508 mlir::Value thisVal, QualType thisTy,
1509 mlir::Value implicitParam,
1510 QualType implicitParamTy, const CallExpr *e);
1511
1512 mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
1514
1517
1519 const Expr *e, Address base, mlir::Value memberPtr,
1520 const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
1521
1523 const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
1524 ReturnValueSlot returnValue, mlir::Value thisPtr,
1525 mlir::Value implicitParam, clang::QualType implicitParamTy,
1526 const clang::CallExpr *ce, CallArgList *rtlArgs);
1527
1529 const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
1530 ReturnValueSlot returnValue, bool hasQualifier,
1531 clang::NestedNameSpecifier qualifier, bool isArrow,
1532 const clang::Expr *base);
1533
1534 mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
1535
1536 void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
1537 mlir::Type elementTy, Address beginPtr,
1538 mlir::Value numElements,
1539 mlir::Value allocSizeWithoutCookie);
1540
1542 const CXXMethodDecl *md,
1544
1546
1548 const CallExpr *callExpr,
1550
1551 void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
1552 Address ptr);
1553
1554 void emitCXXThrowExpr(const CXXThrowExpr *e);
1555
1556 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
1557
1558 mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s);
1559
1560 void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
1561 bool isFnTryBlock = false);
1562
1563 void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock = false);
1564
1566 clang::CXXCtorType ctorType, FunctionArgList &args);
1567
1568 // It's important not to confuse this and emitDelegateCXXConstructorCall.
1569 // Delegating constructors are the C++11 feature. The constructor delegate
1570 // optimization is used to reduce duplication in the base and complete
1571 // constructors where they are substantially the same.
1573 const FunctionArgList &args);
1574
1575 void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
1576 QualType deleteTy);
1577
1578 mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
1579
1580 mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
1581
1582 /// Emit an expression as an initializer for an object (variable, field, etc.)
1583 /// at the given location. The expression is not necessarily the normal
1584 /// initializer for the object, and the address is not necessarily
1585 /// its normal location.
1586 ///
1587 /// \param init the initializing expression
1588 /// \param d the object to act as if we're initializing
1589 /// \param lvalue the lvalue to initialize
1590 /// \param capturedByInit true if \p d is a __block variable whose address is
1591 /// potentially changed by the initializer
1592 void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
1593 LValue lvalue, bool capturedByInit = false);
1594
1595 mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
1596
1597 mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
1598
1599 mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
1600
1602
1604 clang::Expr *init);
1605
1607
1608 mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
1609
1610 mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
1611
1612 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
1613
1614 void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
1615
1616 mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
1617 llvm::ArrayRef<mlir::Value> args = {});
1618
1619 /// Emit the computation of the specified expression of scalar type.
1620 mlir::Value emitScalarExpr(const clang::Expr *e,
1621 bool ignoreResultAssign = false);
1622
1623 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
1624 cir::UnaryOpKind kind, bool isPre);
1625
1626 /// Build a debug stoppoint if we are emitting debug info.
1627 void emitStopPoint(const Stmt *s);
1628
1629 // Build CIR for a statement. useCurrentScope should be true if no
1630 // new scopes need be created when finding a compound statement.
1631 mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
1632 llvm::ArrayRef<const Attr *> attrs = {});
1633
1634 mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
1635 bool useCurrentScope);
1636
1637 mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
1638
1639 void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
1640 CallArgList &callArgs);
1641
1642 RValue emitCoawaitExpr(const CoawaitExpr &e,
1643 AggValueSlot aggSlot = AggValueSlot::ignored(),
1644 bool ignoreResult = false);
1645 /// Emit the computation of the specified expression of complex type,
1646 /// returning the result.
1647 mlir::Value emitComplexExpr(const Expr *e);
1648
1649 void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
1650
1651 mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv,
1652 cir::UnaryOpKind op, bool isPre);
1653
1657 mlir::Value &result);
1658
1659 mlir::LogicalResult
1660 emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
1661 AggValueSlot slot = AggValueSlot::ignored());
1662
1663 mlir::LogicalResult
1665 Address *lastValue = nullptr,
1666 AggValueSlot slot = AggValueSlot::ignored());
1667
1668 void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
1669 mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
1670 LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
1671
1672 mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
1673 mlir::Type condType,
1674 bool buildingTopLevelCase);
1675
1677 clang::CXXCtorType ctorType,
1678 const FunctionArgList &args,
1680
1681 /// We are performing a delegate call; that is, the current function is
1682 /// delegating to another one. Produce a r-value suitable for passing the
1683 /// given parameter.
1684 void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
1686
1687 /// Emit an `if` on a boolean condition to the specified blocks.
1688 /// FIXME: Based on the condition, this might try to simplify the codegen of
1689 /// the conditional based on the branch.
1690 /// In the future, we may apply code generation simplifications here,
1691 /// similar to those used in classic LLVM codegen
1692 /// See `EmitBranchOnBoolExpr` for inspiration.
1693 mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
1694 const clang::Stmt *thenS,
1695 const clang::Stmt *elseS);
1696 cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
1697 BuilderCallbackRef thenBuilder,
1698 mlir::Location thenLoc,
1699 BuilderCallbackRef elseBuilder,
1700 std::optional<mlir::Location> elseLoc = {});
1701
1702 mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
1703
1704 LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
1705
1706 mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
1707 mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
1708
1709 void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
1710 void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
1711
1712 void populateCatchHandlers(cir::TryOp tryOp);
1713
1714 mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
1715
1716 /// Emit code to compute the specified expression,
1717 /// ignoring the result.
1718 void emitIgnoredExpr(const clang::Expr *e);
1719
1720 RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
1721
1722 /// Load a complex number from the specified l-value.
1723 mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
1724
1725 RValue emitLoadOfExtVectorElementLValue(LValue lv);
1726
1727 /// Given an expression that represents a value lvalue, this method emits
1728 /// the address of the lvalue, then loads the result as an rvalue,
1729 /// returning the rvalue.
1730 RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
1731
1732 Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
1733 LValueBaseInfo *pointeeBaseInfo);
1734 LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
1735 QualType refTy, AlignmentSource source);
1736
1737 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1738 /// care to appropriately convert from the memory representation to
1739 /// the LLVM value representation. The l-value must be a simple
1740 /// l-value.
1741 mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
1742 mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
1743 SourceLocation loc, LValueBaseInfo baseInfo);
1744
1745 /// Emit code to compute a designator that specifies the location
1746 /// of the expression.
1747 /// FIXME: document this function better.
1748 LValue emitLValue(const clang::Expr *e);
1749 LValue emitLValueForBitField(LValue base, const FieldDecl *field);
1750 LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
1751
1752 LValue emitLValueForLambdaField(const FieldDecl *field);
1753 LValue emitLValueForLambdaField(const FieldDecl *field,
1754 mlir::Value thisValue);
1755
1756 /// Like emitLValueForField, excpet that if the Field is a reference, this
1757 /// will return the address of the reference and not the address of the value
1758 /// stored in the reference.
1759 LValue emitLValueForFieldInitialization(LValue base,
1760 const clang::FieldDecl *field,
1761 llvm::StringRef fieldName);
1762
1763 LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
1764
1765 LValue emitMemberExpr(const MemberExpr *e);
1766
1767 LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
1768
1769 LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
1770
1771 /// Given an expression with a pointer type, emit the value and compute our
1772 /// best estimate of the alignment of the pointee.
1773 ///
1774 /// One reasonable way to use this information is when there's a language
1775 /// guarantee that the pointer must be aligned to some stricter value, and
1776 /// we're simply trying to ensure that sufficiently obvious uses of under-
1777 /// aligned objects don't get miscompiled; for example, a placement new
1778 /// into the address of a local variable. In such a case, it's quite
1779 /// reasonable to just ignore the returned alignment when it isn't from an
1780 /// explicit source.
1782 LValueBaseInfo *baseInfo = nullptr);
1783
1784 /// Emits a reference binding to the passed in expression.
1785 RValue emitReferenceBindingToExpr(const Expr *e);
1786
1787 mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
1788
1789 RValue emitRotate(const CallExpr *e, bool isRotateLeft);
1790
1791 mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
1792
1793 /// Emit a conversion from the specified type to the specified destination
1794 /// type, both of which are CIR scalar types.
1795 mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
1796 clang::QualType dstType,
1798
1799 void emitScalarInit(const clang::Expr *init, mlir::Location loc,
1800 LValue lvalue, bool capturedByInit = false);
1801
1802 mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
1803 const Expr *argExpr);
1804
1805 void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
1806
1807 void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
1808 bool isInit);
1809
1810 void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
1811 clang::QualType ty, LValueBaseInfo baseInfo,
1812 bool isInit = false, bool isNontemporal = false);
1813 void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
1814
1815 /// Store the specified rvalue into the specified
1816 /// lvalue, where both are guaranteed to the have the same type, and that type
1817 /// is 'Ty'.
1818 void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
1819
1820 mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
1821
1822 LValue emitStringLiteralLValue(const StringLiteral *e,
1823 llvm::StringRef name = ".str");
1824
1825 mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
1826 mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
1827 bool buildingTopLevelCase);
1828 mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
1829
1830 std::optional<mlir::Value>
1831 emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
1832 ReturnValueSlot &returnValue);
1833
1834 /// Given a value and its clang type, returns the value casted to its memory
1835 /// representation.
1836 /// Note: CIR defers most of the special casting to the final lowering passes
1837 /// to conserve the high level information.
1838 mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
1839
1840 /// Emit a trap instruction, which is used to abort the program in an abnormal
1841 /// way, usually for debugging purposes.
1842 /// \p createNewBlock indicates whether to create a new block for the IR
1843 /// builder. Since the `cir.trap` operation is a terminator, operations that
1844 /// follow a trap cannot be emitted after `cir.trap` in the same block. To
1845 /// ensure these operations get emitted successfully, you need to create a new
1846 /// dummy block and set the insertion point there before continuing from the
1847 /// trap operation.
1848 void emitTrap(mlir::Location loc, bool createNewBlock);
1849
1850 LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
1851
1852 mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
1853
1854 /// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
1855 /// checking is enabled. Otherwise, just emit an unreachable instruction.
1856 /// \p createNewBlock indicates whether to create a new block for the IR
1857 /// builder. Since the `cir.unreachable` operation is a terminator, operations
1858 /// that follow an unreachable point cannot be emitted after `cir.unreachable`
1859 /// in the same block. To ensure these operations get emitted successfully,
1860 /// you need to create a dummy block and set the insertion point there before
1861 /// continuing from the unreachable point.
1862 void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
1863
1864 /// This method handles emission of any variable declaration
1865 /// inside a function, including static vars etc.
1866 void emitVarDecl(const clang::VarDecl &d);
1867
1868 void emitVariablyModifiedType(QualType ty);
1869
1870 mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
1871
1872 std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
1873 const CallExpr *expr);
1874
1875 /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
1876 /// nonnull, if 1\p LHS is marked _Nonnull.
1877 void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
1879
1880 /// An object to manage conditionally-evaluated expressions.
1882 CIRGenFunction &cgf;
1883 mlir::OpBuilder::InsertPoint insertPt;
1884
1885 public:
1887 : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
1888 ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
1889 : cgf(cgf), insertPt(ip) {}
1890
1892 assert(cgf.outermostConditional != this);
1893 if (!cgf.outermostConditional)
1894 cgf.outermostConditional = this;
1895 }
1896
1898 assert(cgf.outermostConditional != nullptr);
1899 if (cgf.outermostConditional == this)
1900 cgf.outermostConditional = nullptr;
1901 }
1902
1903 /// Returns the insertion point which will be executed prior to each
1904 /// evaluation of the conditional code. In LLVM OG, this method
1905 /// is called getStartingBlock.
1906 mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
1907 };
1908
1910 std::optional<LValue> lhs{}, rhs{};
1911 mlir::Value result{};
1912 };
1913
1914 // Return true if we're currently emitting one branch or the other of a
1915 // conditional expression.
1916 bool isInConditionalBranch() const { return outermostConditional != nullptr; }
1917
1918 void setBeforeOutermostConditional(mlir::Value value, Address addr) {
1919 assert(isInConditionalBranch());
1920 {
1921 mlir::OpBuilder::InsertionGuard guard(builder);
1922 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
1923 builder.createStore(
1924 value.getLoc(), value, addr, /*isVolatile=*/false,
1925 mlir::IntegerAttr::get(
1926 mlir::IntegerType::get(value.getContext(), 64),
1927 (uint64_t)addr.getAlignment().getAsAlign().value()));
1928 }
1929 }
1930
1931 // Points to the outermost active conditional control. This is used so that
1932 // we know if a temporary should be destroyed conditionally.
1934
1935 /// An RAII object to record that we're evaluating a statement
1936 /// expression.
1938 CIRGenFunction &cgf;
1939
1940 /// We have to save the outermost conditional: cleanups in a
1941 /// statement expression aren't conditional just because the
1942 /// StmtExpr is.
1943 ConditionalEvaluation *savedOutermostConditional;
1944
1945 public:
1947 : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
1948 cgf.outermostConditional = nullptr;
1949 }
1950
1952 cgf.outermostConditional = savedOutermostConditional;
1953 }
1954 };
1955
1956 template <typename FuncTy>
1957 ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
1958 const FuncTy &branchGenFunc);
1959
1960 mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
1961 const clang::Stmt *thenS,
1962 const clang::Stmt *elseS);
1963
1964 /// Build a "reference" to a va_list; this is either the address or the value
1965 /// of the expression, depending on how va_list is defined.
1966 Address emitVAListRef(const Expr *e);
1967
1968 /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
1969 ///
1970 /// \param vaList A reference to the \c va_list as emitted by either
1971 /// \c emitVAListRef or \c emitMSVAListRef.
1972 ///
1973 /// \param count The number of arguments in \c vaList
1974 void emitVAStart(mlir::Value vaList, mlir::Value count);
1975
1976 /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
1977 ///
1978 /// \param vaList A reference to the \c va_list as emitted by either
1979 /// \c emitVAListRef or \c emitMSVAListRef.
1980 void emitVAEnd(mlir::Value vaList);
1981
1982 /// Generate code to get an argument from the passed in pointer
1983 /// and update it accordingly.
1984 ///
1985 /// \param ve The \c VAArgExpr for which to generate code.
1986 ///
1987 /// \param vaListAddr Receives a reference to the \c va_list as emitted by
1988 /// either \c emitVAListRef or \c emitMSVAListRef.
1989 ///
1990 /// \returns SSA value with the argument.
1991 mlir::Value emitVAArg(VAArgExpr *ve);
1992
1993 /// ----------------------
1994 /// CIR build helpers
1995 /// -----------------
1996public:
1997 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
1998 const Twine &name = "tmp",
1999 mlir::Value arraySize = nullptr,
2000 bool insertIntoFnEntryBlock = false);
2001 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2002 const Twine &name = "tmp",
2003 mlir::OpBuilder::InsertPoint ip = {},
2004 mlir::Value arraySize = nullptr);
2005 Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
2006 const Twine &name = "tmp",
2007 mlir::Value arraySize = nullptr,
2008 Address *alloca = nullptr,
2009 mlir::OpBuilder::InsertPoint ip = {});
2010 Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
2011 mlir::Location loc,
2012 const Twine &name = "tmp",
2013 mlir::Value arraySize = nullptr,
2014 mlir::OpBuilder::InsertPoint ip = {});
2015
2016 /// Create a temporary memory object of the given type, with
2017 /// appropriate alignmen and cast it to the default address space. Returns
2018 /// the original alloca instruction by \p Alloca if it is not nullptr.
2019 Address createMemTemp(QualType t, mlir::Location loc,
2020 const Twine &name = "tmp", Address *alloca = nullptr,
2021 mlir::OpBuilder::InsertPoint ip = {});
2022 Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
2023 const Twine &name = "tmp", Address *alloca = nullptr,
2024 mlir::OpBuilder::InsertPoint ip = {});
2025
2026 //===--------------------------------------------------------------------===//
2027 // OpenMP Emission
2028 //===--------------------------------------------------------------------===//
2029public:
2030 mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
2031 mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
2032 mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
2033 mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
2034 mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
2035 mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
2036 mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
2037 mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
2038 mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
2039 mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
2040 mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
2041 mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
2042 mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
2043 mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
2044 mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
2045 mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
2046 mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
2047 mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
2048 mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
2049 mlir::LogicalResult
2050 emitOMPParallelForDirective(const OMPParallelForDirective &s);
2051 mlir::LogicalResult
2052 emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
2053 mlir::LogicalResult
2054 emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
2055 mlir::LogicalResult
2056 emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
2057 mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
2058 mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
2059 mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
2060 mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
2061 mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
2062 mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
2063 mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
2064 mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
2065 mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
2066 mlir::LogicalResult
2067 emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s);
2068 mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
2069 mlir::LogicalResult
2070 emitOMPTargetDataDirective(const OMPTargetDataDirective &s);
2071 mlir::LogicalResult
2072 emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s);
2073 mlir::LogicalResult
2074 emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s);
2075 mlir::LogicalResult
2076 emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s);
2077 mlir::LogicalResult
2078 emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s);
2079 mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
2080 mlir::LogicalResult
2081 emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s);
2082 mlir::LogicalResult
2083 emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s);
2084 mlir::LogicalResult
2085 emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s);
2086 mlir::LogicalResult
2087 emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s);
2088 mlir::LogicalResult
2089 emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s);
2090 mlir::LogicalResult
2091 emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s);
2092 mlir::LogicalResult
2093 emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
2094 mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
2095 const OMPParallelMaskedTaskLoopDirective &s);
2097 const OMPParallelMaskedTaskLoopSimdDirective &s);
2098 mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
2099 const OMPParallelMasterTaskLoopDirective &s);
2101 const OMPParallelMasterTaskLoopSimdDirective &s);
2102 mlir::LogicalResult
2103 emitOMPDistributeDirective(const OMPDistributeDirective &s);
2104 mlir::LogicalResult emitOMPDistributeParallelForDirective(
2105 const OMPDistributeParallelForDirective &s);
2107 const OMPDistributeParallelForSimdDirective &s);
2108 mlir::LogicalResult
2109 emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s);
2111 const OMPTargetParallelGenericLoopDirective &s);
2112 mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
2113 const OMPTargetParallelForSimdDirective &s);
2114 mlir::LogicalResult
2115 emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s);
2116 mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
2117 const OMPTargetTeamsGenericLoopDirective &s);
2118 mlir::LogicalResult
2119 emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s);
2120 mlir::LogicalResult
2121 emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s);
2122 mlir::LogicalResult
2123 emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s);
2125 const OMPTeamsDistributeParallelForSimdDirective &s);
2127 const OMPTeamsDistributeParallelForDirective &s);
2128 mlir::LogicalResult
2129 emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s);
2130 mlir::LogicalResult
2131 emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s);
2132 mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
2133 const OMPTargetTeamsDistributeDirective &s);
2135 const OMPTargetTeamsDistributeParallelForDirective &s);
2137 const OMPTargetTeamsDistributeParallelForSimdDirective &s);
2139 const OMPTargetTeamsDistributeSimdDirective &s);
2140 mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
2141 mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
2142 mlir::LogicalResult
2143 emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s);
2144 mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
2145 mlir::LogicalResult
2146 emitOMPInterchangeDirective(const OMPInterchangeDirective &s);
2147 mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
2148 mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
2149 mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
2150
2151 void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d);
2152 void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d);
2153 void emitOMPCapturedExpr(const OMPCapturedExprDecl &d);
2154 void emitOMPAllocateDecl(const OMPAllocateDecl &d);
2155 void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d);
2156 void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d);
2157 void emitOMPRequiresDecl(const OMPRequiresDecl &d);
2158
2159private:
2160 template <typename Op>
2161 void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
2162
2163 //===--------------------------------------------------------------------===//
2164 // OpenACC Emission
2165 //===--------------------------------------------------------------------===//
2166private:
2167 template <typename Op>
2168 Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
2169 llvm::ArrayRef<const OpenACCClause *> clauses);
2170 // Function to do the basic implementation of an operation with an Associated
2171 // Statement. Models AssociatedStmtConstruct.
2172 template <typename Op, typename TermOp>
2173 mlir::LogicalResult
2174 emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
2175 OpenACCDirectiveKind dirKind,
2176 llvm::ArrayRef<const OpenACCClause *> clauses,
2177 const Stmt *associatedStmt);
2178
2179 template <typename Op, typename TermOp>
2180 mlir::LogicalResult emitOpenACCOpCombinedConstruct(
2181 mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
2182 llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
2183
2184 template <typename Op>
2185 void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
2186 ArrayRef<const OpenACCClause *> clauses);
2187 // The second template argument doesn't need to be a template, since it should
2188 // always be an mlir::acc::LoopOp, but as this is a template anyway, we make
2189 // it a template argument as this way we can avoid including the OpenACC MLIR
2190 // headers here. We will count on linker failures/explicit instantiation to
2191 // ensure we don't mess this up, but it is only called from 1 place, and
2192 // instantiated 3x.
2193 template <typename ComputeOp, typename LoopOp>
2194 void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
2195 OpenACCDirectiveKind dirKind,
2196 ArrayRef<const OpenACCClause *> clauses);
2197
2198 // The OpenACC LoopOp requires that we have auto, seq, or independent on all
2199 // LoopOp operations for the 'none' device type case. This function checks if
2200 // the LoopOp has one, else it updates it to have one.
2201 void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
2203
2204 // The OpenACC 'cache' construct actually applies to the 'loop' if present. So
2205 // keep track of the 'loop' so that we can add the cache vars to it correctly.
2206 mlir::acc::LoopOp *activeLoopOp = nullptr;
2207
2208 struct ActiveOpenACCLoopRAII {
2209 CIRGenFunction &cgf;
2210 mlir::acc::LoopOp *oldLoopOp;
2211
2212 ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
2213 : cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
2214 cgf.activeLoopOp = newOp;
2215 }
2216 ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
2217 };
2218
2219 // Keep track of the last place we inserted a 'recipe' so that we can insert
2220 // the next one in lexical order.
2221 mlir::OpBuilder::InsertPoint lastRecipeLocation;
2222
2223public:
2224 // Helper type used to store the list of important information for a 'data'
2225 // clause variable, or a 'cache' variable reference.
2227 mlir::Location beginLoc;
2228 mlir::Value varValue;
2229 std::string name;
2230 // The type of the original variable reference: that is, after 'bounds' have
2231 // removed pointers/array types/etc. So in the case of int arr[5], and a
2232 // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
2236 // The list of types that we found when going through the bounds, which we
2237 // can use to properly set the alloca section.
2239 };
2240
2241 // Gets the collection of info required to lower and OpenACC clause or cache
2242 // construct variable reference.
2244 // Helper function to emit the integer expressions as required by an OpenACC
2245 // clause/construct.
2246 mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
2247 // Helper function to emit an integer constant as an mlir int type, used for
2248 // constants in OpenACC constructs/clauses.
2249 mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
2250 int64_t value);
2251
2252 mlir::LogicalResult
2254 mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
2255 mlir::LogicalResult
2257 mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
2258 mlir::LogicalResult
2260 mlir::LogicalResult
2262 mlir::LogicalResult
2264 mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
2265 mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
2266 mlir::LogicalResult
2268 mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
2269 mlir::LogicalResult
2271 mlir::LogicalResult
2273 mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
2274
2277
2278 /// Create a temporary memory object for the given aggregate type.
2279 AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
2280 const Twine &name = "tmp",
2281 Address *alloca = nullptr) {
2283 return AggValueSlot::forAddr(
2284 createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
2287 }
2288
2289private:
2290 QualType getVarArgType(const Expr *arg);
2291};
2292
2293} // namespace clang::CIRGen
2294
2295#endif
Defines the clang::ASTContext interface.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
Defines the clang::Expr interface and subclasses for C++ expressions.
Defines an enumeration for C++ overloaded operators.
C Language Family Type Representation.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
Represents a member of a struct/union/class.
Definition Decl.h:3160
This class represents a 'loop' construct. The 'loop' construct applies to a 'for' loop (or range-for ...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4287
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3723
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3267
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6814
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition Expr.h:4387
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition Expr.h:4425
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition Expr.h:4422
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
BreakStmt - This represents a break.
Definition Stmt.h:3125
mlir::Value getPointer() const
Definition Address.h:90
static Address invalid()
Definition Address.h:69
clang::CharUnits getAlignment() const
Definition Address.h:130
mlir::Value getBasePointer() const
Definition Address.h:95
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
AbstractCallee(const clang::FunctionDecl *fd)
const clang::ParmVarDecl * getParamDecl(unsigned I) const
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
An object to manage conditionally-evaluated expressions.
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
mlir::OpBuilder::InsertPoint getInsertPoint() const
Returns the insertion point which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forReference(mlir::TypedAttr c)
static ConstantEmission forValue(mlir::TypedAttr c)
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, LValue lvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
RunCleanupsScope(CIRGenFunction &cgf)
Enter a new cleanup scope.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void restore()
Can be used to restore the state early, before the dtor is run.
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value)
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
static bool hasScalarEvaluationKind(clang::QualType type)
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
void emitOpenACCRoutine(const OpenACCRoutineDecl &d)
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, cir::GlobalOp gv, cir::GetGlobalOp gvAddr)
Add the initializer for 'd' to the global variable that has already been created for it.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard SourceLocExprScopeGuard
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, mlir::OpBuilder::InsertPoint ip={})
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
ImplicitParamDecl * cxxabiThisDecl
CXXThisDecl - When generating code for a C++ member function, this will hold the implicit 'this' decl...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
llvm::ScopedHashTable< const clang::Decl *, mlir::Value > SymTableTy
The symbol table maps a variable name to a value in the current scope.
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, CallArgList &callArgs)
mlir::Block * getCurFunctionEntryBlock()
void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp, bool isFnTryBlock=false)
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
void emitOMPRequiresDecl(const OMPRequiresDecl &d)
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
void populateEHCatchRegions(EHScopeStack::stable_iterator scope, cir::TryOp tryOp)
Address cxxDefaultInitExprThis
The value of 'this' to sue when evaluating CXXDefaultInitExprs within this expression.
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock=false)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
void setBeforeOutermostConditional(mlir::Value value, Address addr)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::Value loadCXXThis()
Load the value for 'this'.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
const clang::Decl * curFuncDecl
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
LValue emitLValueForLambdaField(const FieldDecl *field)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOrTryCall=nullptr)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
bool isTrivialInitializer(const Expr *init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void emitOpenACCDeclare(const OpenACCDeclareDecl &d)
Address getAddrOfLocalVar(const clang::VarDecl *vd)
Return the address of a local variable.
void emitAnyExprToExn(const Expr *e, Address addr)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy)
JumpDest returnBlock(mlir::Block *retBlock)
Unified return block.
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, bool delegating)
Return the VTT parameter that should be passed to a base constructor/destructor with virtual bases.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
void initializeVTablePointers(mlir::Location loc, const clang::CXXRecordDecl *rd)
mlir::Type convertType(const TypeDecl *t)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d)
void emitAggregateStore(mlir::Value value, Address dest)
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
ConditionalEvaluation * outermostConditional
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit)
RValue emitAtomicExpr(AtomicExpr *e)
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, LValue lvalue, bool capturedByInit=false)
Emit an expression as an initializer for an object (variable, field, etc.) at the given location.
mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s)
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
void emitVAStart(mlir::Value vaList, mlir::Value count)
Emits the start of a CIR variable-argument operation (cir.va_start)
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass)
const TargetCIRGenInfo & getTargetHooks() const
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
JumpDest getJumpDestInCurrentScope(mlir::Block *target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e)
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
CleanupKind getCleanupKind(QualType::DestructionKind kind)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
Definition CIRGenAsm.cpp:86
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
void emitOMPAllocateDecl(const OMPAllocateDecl &d)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType)
ImplicitParamDecl * cxxStructorImplicitParamDecl
When generating code for a constructor or destructor, this will hold the implicit argument (e....
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType)
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
void populateCatchHandlersIfRequired(cir::TryOp tryOp)
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
void populateUnwindResumeBlock(bool isCleanup, cir::TryOp tryOp)
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
LValue emitAggExprToLValue(const Expr *e)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
clang::CurrentSourceLocExprScope curSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
mlir::Value loadCXXVTT()
Load the VTT parameter to base constructors/destructors have virtual bases.
void emitVarDecl(const clang::VarDecl &d)
This method handles emission of any variable declaration inside a function, including static vars etc...
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo)
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr, const clang::CXXRecordDecl *vtableClass)
Return the Value of the vtable pointer member pointed to by thisAddr.
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Destroys all the elements of the given array, beginning from last to first.
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
RValue emitAnyExprToTemp(const clang::Expr *e)
Similarly to emitAnyExpr(), however, the result will always be accessible even if no aggregate locati...
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS)
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::SmallPtrSet< const clang::CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
bool hasVolatileMember(QualType t)
returns true if aggregate type has a volatile member.
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
void emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType)
clang::FieldDecl * lambdaThisCaptureField
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
void emitConstructorBody(FunctionArgList &args)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
bool haveInsertPoint() const
True if an insertion point is defined.
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
void maybeEmitDeferredVarDeclInit(const VarDecl *vd)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
std::optional< mlir::Value > emitAArch64SMEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void pushStackRestore(CleanupKind kind, Address spMem)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
void emitAutoVarDecl(const clang::VarDecl &d)
Emit code and set up symbol table for a variable declaration with auto, register, or no storage class...
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
mlir::Value emitOpenACCIntExpr(const Expr *intExpr)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer)
Immediately perform the destruction of the given object.
const CIRGenModule & getCIRGenModule() const
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty)
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
void emitAtomicInit(Expr *init, LValue dest)
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, int64_t value)
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d)
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, llvm::ArrayRef< mlir::Value > args={})
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
void emitCXXThrowExpr(const CXXThrowExpr *e)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
llvm::SmallVector< VPtr, 4 > VPtrsVector
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
bool sawAsmBlock
Whether or not a Microsoft-style asm block has been processed within this fuction.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
EHScopeStack::stable_iterator currentCleanupStackDepth
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
void pushFullExprCleanup(CleanupKind kind, As... a)
Push a cleanup to be run at the end of the current full-expression.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc)
We are performing a delegate call; that is, the current function is delegating to another one.
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
void populateCatchHandlers(cir::TryOp tryOp)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
std::optional< mlir::Value > emitAArch64SVEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args)
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize=nullptr)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
void popCleanupBlock()
Pops a cleanup block.
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
void emitAutoVarCleanups(const AutoVarEmission &emission)
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
A saved depth on the scope stack.
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:254
Represents a call to a C++ constructor.
Definition ExprCXX.h:1548
Represents a C++ constructor within a class.
Definition DeclCXX.h:2604
Represents a C++ base or member initializer.
Definition DeclCXX.h:2369
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1270
A use of a default initializer in a constructor or in aggregate initialization.
Definition ExprCXX.h:1377
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2626
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:481
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2355
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:84
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2745
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
Represents a C++ temporary.
Definition ExprCXX.h:1459
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1208
CXXTryStmt - A C++ try block, including all handlers.
Definition StmtCXX.h:69
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
CaseStmt - Represent a case statement.
Definition Stmt.h:1910
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
Represents a 'co_await' expression.
Definition ExprCXX.h:5369
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3539
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1730
ContinueStmt - This represents a continue.
Definition Stmt.h:3109
Represents the body of a coroutine.
Definition StmtCXX.h:320
SourceLocExprScopeGuard(const Expr *DefaultExpr, CurrentSourceLocExprScope &Current)
Represents the current source location and context used to determine the value of the source location...
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1621
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2822
This represents one expression.
Definition Expr.h:112
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6498
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2878
Represents a function declaration or definition.
Definition Decl.h:2000
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2959
IfStmt - This represents an if/then/else.
Definition Stmt.h:2249
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:2998
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2136
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
ObjCMethodDecl - Represents an instance or class method declaration.
Definition DeclObjC.h:140
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
Represents a parameter to a function.
Definition Decl.h:1790
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8333
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3150
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2499
Exposes information about the current target.
Definition TargetInfo.h:226
Represents a declaration of a type.
Definition Decl.h:3513
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4891
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2687
#define bool
Definition gpuintrin.h:32
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
OpenACCDirectiveKind
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start, SourceLocation DirectiveLoc, SourceLocation End, ArrayRef< const OpenACCClause * > Clauses, Stmt *StructuredBlock)
const FunctionProtoType * T
CXXDtorType
C++ destructor types.
Definition ABI.h:34
#define true
Definition stdbool.h:25
static bool aggValueSlot()
static bool peepholeProtection()
static bool opAllocaEscapeByReference()
static bool generateDebugInfo()
AutoVarEmission(const clang::VarDecl &variable)
bool isEscapingByRef
True if the variable is a __block variable that is captured by an escaping block.
Address addr
The address of the alloca for languages with explicit address space (e.g.
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
bool isConstantAggregate
True if the variable is of aggregate type and has a constant initializer.
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
Address getObjectAddress(CIRGenFunction &cgf) const
Returns the address of the object within this declaration.
std::unique_ptr< CGCoroData > data
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
mlir::Block * createCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc)
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
void updateRetLoc(mlir::Block *b, mlir::Location loc)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
llvm::PointerUnion< const clang::FunctionProtoType *, const clang::ObjCMethodDecl * > p
PrototypeWrapper(const clang::ObjCMethodDecl *md)
PrototypeWrapper(const clang::FunctionProtoType *ft)
const clang::CXXRecordDecl * vtableClass
const clang::CXXRecordDecl * nearestVBase
VlaSizePair(mlir::Value num, QualType ty)