clang 22.0.0git
CIRGenFunction.h
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
14#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
15
16#include "CIRGenBuilder.h"
17#include "CIRGenCall.h"
18#include "CIRGenModule.h"
19#include "CIRGenTypeCache.h"
20#include "CIRGenValue.h"
21#include "EHScopeStack.h"
22
23#include "Address.h"
24
27#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/ExprCXX.h"
31#include "clang/AST/Stmt.h"
32#include "clang/AST/Type.h"
37#include "llvm/ADT/ScopedHashTable.h"
38
39namespace {
40class ScalarExprEmitter;
41} // namespace
42
43namespace mlir {
44namespace acc {
45class LoopOp;
46} // namespace acc
47} // namespace mlir
48
49namespace clang::CIRGen {
50
51struct CGCoroData;
52
54public:
56
57private:
58 friend class ::ScalarExprEmitter;
59 /// The builder is a helper class to create IR inside a function. The
60 /// builder is stateful, in particular it keeps an "insertion point": this
61 /// is where the next operations will be introduced.
62 CIRGenBuilderTy &builder;
63
64 /// A jump destination is an abstract label, branching to which may
65 /// require a jump out through normal cleanups.
66 struct JumpDest {
67 JumpDest() = default;
68 JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
69 unsigned index = 0)
70 : block(block) {}
71
72 bool isValid() const { return block != nullptr; }
73 mlir::Block *getBlock() const { return block; }
74 EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
75 unsigned getDestIndex() const { return index; }
76
77 // This should be used cautiously.
78 void setScopeDepth(EHScopeStack::stable_iterator depth) {
79 scopeDepth = depth;
80 }
81
82 private:
83 mlir::Block *block = nullptr;
85 unsigned index;
86 };
87
88public:
89 /// The GlobalDecl for the current function being compiled or the global
90 /// variable currently being initialized.
92
93 /// Unified return block.
94 /// In CIR this is a function because each scope might have
95 /// its associated return block.
96 JumpDest returnBlock(mlir::Block *retBlock) {
97 return getJumpDestInCurrentScope(retBlock);
98 }
99
101
102 /// The compiler-generated variable that holds the return value.
103 std::optional<mlir::Value> fnRetAlloca;
104
105 // Holds coroutine data if the current function is a coroutine. We use a
106 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
107 // in this header.
108 struct CGCoroInfo {
109 std::unique_ptr<CGCoroData> data;
110 CGCoroInfo();
111 ~CGCoroInfo();
112 };
114
115 bool isCoroutine() const { return curCoro.data != nullptr; }
116
117 /// The temporary alloca to hold the return value. This is
118 /// invalid iff the function has no return value.
120
121 /// Tracks function scope overall cleanup handling.
123
125
126 /// A mapping from NRVO variables to the flags used to indicate
127 /// when the NRVO has been applied to this variable.
128 llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
129
130 llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
133
134 /// CXXThisDecl - When generating code for a C++ member function,
135 /// this will hold the implicit 'this' declaration.
137 mlir::Value cxxabiThisValue = nullptr;
138 mlir::Value cxxThisValue = nullptr;
140
141 /// When generating code for a constructor or destructor, this will hold the
142 /// implicit argument (e.g. VTT).
145
146 /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
147 /// expression.
149
150 // Holds the Decl for the current outermost non-closure context
151 const clang::Decl *curFuncDecl = nullptr;
152 /// This is the inner-most code context, which includes blocks.
153 const clang::Decl *curCodeDecl = nullptr;
154
155 /// The current function or global initializer that is generated code for.
156 /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
157 /// global initializers.
158 mlir::Operation *curFn = nullptr;
159
160 /// Save Parameter Decl for coroutine.
162
163 using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
164 /// This keeps track of the CIR allocas or globals for local C
165 /// declarations.
167
168 /// The type of the condition for the emitting switch statement.
170
171 clang::ASTContext &getContext() const { return cgm.getASTContext(); }
172
173 CIRGenBuilderTy &getBuilder() { return builder; }
174
176 const CIRGenModule &getCIRGenModule() const { return cgm; }
177
179 // We currently assume this isn't called for a global initializer.
180 auto fn = mlir::cast<cir::FuncOp>(curFn);
181 return &fn.getRegion().front();
182 }
183
184 /// Sanitizers enabled for this function.
186
187 /// The symbol table maps a variable name to a value in the current scope.
188 /// Entering a function creates a new scope, and the function arguments are
189 /// added to the mapping. When the processing of a function is terminated,
190 /// the scope is destroyed and the mappings created in this scope are
191 /// dropped.
192 using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
194
195 /// Whether a cir.stacksave operation has been added. Used to avoid
196 /// inserting cir.stacksave for multiple VLAs in the same scope.
197 bool didCallStackSave = false;
198
199 /// Whether or not a Microsoft-style asm block has been processed within
200 /// this fuction. These can potentially set the return value.
201 bool sawAsmBlock = false;
202
203 mlir::Type convertTypeForMem(QualType t);
204
205 mlir::Type convertType(clang::QualType t);
206 mlir::Type convertType(const TypeDecl *t) {
207 return convertType(getContext().getTypeDeclType(t));
208 }
209
210 /// Get integer from a mlir::Value that is an int constant or a constant op.
211 static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
212 auto constOp = val.getDefiningOp<cir::ConstantOp>();
213 assert(constOp && "getSExtIntValueFromConstOp call with non ConstantOp");
214 return constOp.getIntValue().getSExtValue();
215 }
216
217 /// Get zero-extended integer from a mlir::Value that is an int constant or a
218 /// constant op.
219 static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
220 auto constOp = val.getDefiningOp<cir::ConstantOp>();
221 assert(constOp && "getZExtIntValueFromConstOp call with non ConstantOp");
222 return constOp.getIntValue().getZExtValue();
223 }
224
225 /// Return the cir::TypeEvaluationKind of QualType \c type.
227
231
235
237 bool suppressNewContext = false);
239
240 CIRGenTypes &getTypes() const { return cgm.getTypes(); }
241
242 const TargetInfo &getTarget() const { return cgm.getTarget(); }
243 mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
244
246 return cgm.getTargetCIRGenInfo();
247 }
248
249 // ---------------------
250 // Opaque value handling
251 // ---------------------
252
253 /// Keeps track of the current set of opaque value expressions.
254 llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
255 llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
256
257 // This keeps track of the associated size for each VLA type.
258 // We track this by the size expression rather than the type itself because
259 // in certain situations, like a const qualifier applied to an VLA typedef,
260 // multiple VLA types can share the same size expression.
261 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
262 // enter/leave scopes.
263 llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
264
265public:
266 /// A non-RAII class containing all the information about a bound
267 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
268 /// this which makes individual mappings very simple; using this
269 /// class directly is useful when you have a variable number of
270 /// opaque values or don't want the RAII functionality for some
271 /// reason.
272 class OpaqueValueMappingData {
273 const OpaqueValueExpr *opaqueValue;
274 bool boundLValue;
275
276 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
277 : opaqueValue(ov), boundLValue(boundLValue) {}
278
279 public:
280 OpaqueValueMappingData() : opaqueValue(nullptr) {}
281
282 static bool shouldBindAsLValue(const Expr *expr) {
283 // gl-values should be bound as l-values for obvious reasons.
284 // Records should be bound as l-values because IR generation
285 // always keeps them in memory. Expressions of function type
286 // act exactly like l-values but are formally required to be
287 // r-values in C.
288 return expr->isGLValue() || expr->getType()->isFunctionType() ||
290 }
291
293 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
294 if (shouldBindAsLValue(ov))
295 return bind(cgf, ov, cgf.emitLValue(e));
296 return bind(cgf, ov, cgf.emitAnyExpr(e));
297 }
298
300 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
301 assert(shouldBindAsLValue(ov));
302 cgf.opaqueLValues.insert(std::make_pair(ov, lv));
303 return OpaqueValueMappingData(ov, true);
304 }
305
307 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
308 assert(!shouldBindAsLValue(ov));
309 cgf.opaqueRValues.insert(std::make_pair(ov, rv));
310
311 OpaqueValueMappingData data(ov, false);
312
313 // Work around an extremely aggressive peephole optimization in
314 // EmitScalarConversion which assumes that all other uses of a
315 // value are extant.
317 return data;
318 }
319
320 bool isValid() const { return opaqueValue != nullptr; }
321 void clear() { opaqueValue = nullptr; }
322
324 assert(opaqueValue && "no data to unbind!");
325
326 if (boundLValue) {
327 cgf.opaqueLValues.erase(opaqueValue);
328 } else {
329 cgf.opaqueRValues.erase(opaqueValue);
331 }
332 }
333 };
334
335 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
337 CIRGenFunction &cgf;
339
340 public:
344
345 /// Build the opaque value mapping for the given conditional
346 /// operator if it's the GNU ?: extension. This is a common
347 /// enough pattern that the convenience operator is really
348 /// helpful.
349 ///
352 : cgf(cgf) {
353 if (mlir::isa<ConditionalOperator>(op))
354 // Leave Data empty.
355 return;
356
358 mlir::cast<BinaryConditionalOperator>(op);
360 e->getCommon());
361 }
362
363 /// Build the opaque value mapping for an OpaqueValueExpr whose source
364 /// expression is set to the expression the OVE represents.
366 : cgf(cgf) {
367 if (ov) {
368 assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
369 "for OVE with no source expression");
370 data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
371 }
372 }
373
375 LValue lvalue)
376 : cgf(cgf),
377 data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
378
380 RValue rvalue)
381 : cgf(cgf),
382 data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
383
384 void pop() {
385 data.unbind(cgf);
386 data.clear();
387 }
388
390 if (data.isValid())
391 data.unbind(cgf);
392 }
393 };
394
395private:
396 /// Declare a variable in the current scope, return success if the variable
397 /// wasn't declared yet.
398 void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
399 mlir::Location loc, clang::CharUnits alignment,
400 bool isParam = false);
401
402public:
403 mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
404
405 void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
406
407private:
408 // Track current variable initialization (if there's one)
409 const clang::VarDecl *currVarDecl = nullptr;
410 class VarDeclContext {
412 const clang::VarDecl *oldVal = nullptr;
413
414 public:
415 VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
416 if (p.currVarDecl)
417 oldVal = p.currVarDecl;
418 p.currVarDecl = value;
419 }
420
421 /// Can be used to restore the state early, before the dtor
422 /// is run.
423 void restore() { p.currVarDecl = oldVal; }
424 ~VarDeclContext() { restore(); }
425 };
426
427public:
428 /// Use to track source locations across nested visitor traversals.
429 /// Always use a `SourceLocRAIIObject` to change currSrcLoc.
430 std::optional<mlir::Location> currSrcLoc;
432 CIRGenFunction &cgf;
433 std::optional<mlir::Location> oldLoc;
434
435 public:
436 SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
437 if (cgf.currSrcLoc)
438 oldLoc = cgf.currSrcLoc;
439 cgf.currSrcLoc = value;
440 }
441
442 /// Can be used to restore the state early, before the dtor
443 /// is run.
444 void restore() { cgf.currSrcLoc = oldLoc; }
446 };
447
449 llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
450
451 /// Hold counters for incrementally naming temporaries
452 unsigned counterRefTmp = 0;
453 unsigned counterAggTmp = 0;
454 std::string getCounterRefTmpAsString();
455 std::string getCounterAggTmpAsString();
456
457 /// Helpers to convert Clang's SourceLocation to a MLIR Location.
458 mlir::Location getLoc(clang::SourceLocation srcLoc);
459 mlir::Location getLoc(clang::SourceRange srcLoc);
460 mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
461
462 const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
463
464 /// True if an insertion point is defined. If not, this indicates that the
465 /// current code being emitted is unreachable.
466 /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
467 /// since we don't yet force null insertion point to designate behavior (like
468 /// LLVM's codegen does) and we probably shouldn't.
469 bool haveInsertPoint() const {
470 return builder.getInsertionBlock() != nullptr;
471 }
472
473 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
474 // an ObjCMethodDecl.
476 llvm::PointerUnion<const clang::FunctionProtoType *,
477 const clang::ObjCMethodDecl *>
479
482 };
483
485
486 /// An abstract representation of regular/ObjC call/message targets.
488 /// The function declaration of the callee.
489 [[maybe_unused]] const clang::Decl *calleeDecl;
490
491 public:
492 AbstractCallee() : calleeDecl(nullptr) {}
493 AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
494
495 bool hasFunctionDecl() const {
496 return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
497 }
498
499 unsigned getNumParams() const {
500 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
501 return fd->getNumParams();
502 return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
503 }
504
505 const clang::ParmVarDecl *getParamDecl(unsigned I) const {
506 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
507 return fd->getParamDecl(I);
508 return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
509 I);
510 }
511 };
512
513 struct VlaSizePair {
514 mlir::Value numElts;
516
517 VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
518 };
519
520 /// Return the number of elements for a single dimension
521 /// for the given array type.
522 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
523
524 /// Returns an MLIR::Value+QualType pair that corresponds to the size,
525 /// in non-variably-sized elements, of a variable length array type,
526 /// plus that largest non-variably-sized element type. Assumes that
527 /// the type has already been emitted with emitVariablyModifiedType.
528 VlaSizePair getVLASize(const VariableArrayType *type);
529 VlaSizePair getVLASize(QualType type);
530
532
533 mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
534 return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
535 }
536
537 void finishFunction(SourceLocation endLoc);
538
539 /// Determine whether the given initializer is trivial in the sense
540 /// that it requires no code to be generated.
541 bool isTrivialInitializer(const Expr *init);
542
543 /// If the specified expression does not fold to a constant, or if it does but
544 /// contains a label, return false. If it constant folds return true and set
545 /// the boolean result in Result.
546 bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
547 bool allowLabels = false);
549 llvm::APSInt &resultInt,
550 bool allowLabels = false);
551
552 /// Return true if the statement contains a label in it. If
553 /// this statement is not executed normally, it not containing a label means
554 /// that we can just remove the code.
555 bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
556
557 Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
558
559 class ConstantEmission {
560 // Cannot use mlir::TypedAttr directly here because of bit availability.
561 llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
562 ConstantEmission(mlir::TypedAttr c, bool isReference)
563 : valueAndIsReference(c, isReference) {}
564
565 public:
567 static ConstantEmission forReference(mlir::TypedAttr c) {
568 return ConstantEmission(c, true);
569 }
570 static ConstantEmission forValue(mlir::TypedAttr c) {
571 return ConstantEmission(c, false);
572 }
573
574 explicit operator bool() const {
575 return valueAndIsReference.getOpaqueValue() != nullptr;
576 }
577
578 bool isReference() const { return valueAndIsReference.getInt(); }
580 assert(isReference());
581 cgf.cgm.errorNYI(refExpr->getSourceRange(),
582 "ConstantEmission::getReferenceLValue");
583 return {};
584 }
585
586 mlir::TypedAttr getValue() const {
587 assert(!isReference());
588 return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
589 }
590 };
591
592 ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
593 ConstantEmission tryEmitAsConstant(const MemberExpr *me);
594
597 /// The address of the alloca for languages with explicit address space
598 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
599 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
600 /// as a global constant.
602
603 /// True if the variable is of aggregate type and has a constant
604 /// initializer.
606
607 /// True if the variable is a __block variable that is captured by an
608 /// escaping block.
609 bool isEscapingByRef = false;
610
611 /// True if the variable was emitted as an offload recipe, and thus doesn't
612 /// have the same sort of alloca initialization.
613 bool emittedAsOffload = false;
614
615 mlir::Value nrvoFlag{};
616
617 struct Invalid {};
619
622
624
625 bool wasEmittedAsGlobal() const { return !addr.isValid(); }
626
628
629 /// Returns the raw, allocated address, which is not necessarily
630 /// the address of the object itself. It is casted to default
631 /// address space for address space agnostic languages.
632 Address getAllocatedAddress() const { return addr; }
633
634 // Changes the stored address for the emission. This function should only
635 // be used in extreme cases, and isn't required to model normal AST
636 // initialization/variables.
638
639 /// Returns the address of the object within this declaration.
640 /// Note that this does not chase the forwarding pointer for
641 /// __block decls.
643 if (!isEscapingByRef)
644 return addr;
645
647 return Address::invalid();
648 }
649 };
650
651 /// The given basic block lies in the current EH scope, but may be a
652 /// target of a potentially scope-crossing jump; get a stable handle
653 /// to which we can perform this jump later.
654 /// CIRGen: this mostly tracks state for figuring out the proper scope
655 /// information, no actual branches are emitted.
656 JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
657 return JumpDest(target, ehStack.getInnermostNormalCleanup(),
659 }
660 /// IndirectBranch - The first time an indirect goto is seen we create a block
661 /// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
662 /// is emitted at the end of the function, once all block destinations have
663 /// been resolved.
664 mlir::Block *indirectGotoBlock = nullptr;
665
668
669 /// Perform the usual unary conversions on the specified expression and
670 /// compare the result against zero, returning an Int1Ty value.
671 mlir::Value evaluateExprAsBool(const clang::Expr *e);
672
673 cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
674 cir::GlobalOp gv,
675 cir::GetGlobalOp gvAddr);
676
677 /// Enter the cleanups necessary to complete the given phase of destruction
678 /// for a destructor. The end result should call destructors on members and
679 /// base classes in reverse order of their construction.
681
682 /// Determines whether an EH cleanup is required to destroy a type
683 /// with the given destruction kind.
684 /// TODO(cir): could be shared with Clang LLVM codegen
686 switch (kind) {
688 return false;
692 return getLangOpts().Exceptions;
694 return getLangOpts().Exceptions &&
695 cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
696 }
697 llvm_unreachable("bad destruction kind");
698 }
699
703
704 void pushStackRestore(CleanupKind kind, Address spMem);
705
706 /// Set the address of a local variable.
708 assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
709 localDeclMap.insert({vd, addr});
710
711 // Add to the symbol table if not there already.
712 if (symbolTable.count(vd))
713 return;
714 symbolTable.insert(vd, addr.getPointer());
715 }
716
717 // Replaces the address of the local variable, if it exists. Else does the
718 // same thing as setAddrOfLocalVar.
720 localDeclMap.insert_or_assign(vd, addr);
721 }
722
723 // A class to allow reverting changes to a var-decl's registration to the
724 // localDeclMap. This is used in cases where things are being inserted into
725 // the variable list but don't follow normal lookup/search rules, like in
726 // OpenACC recipe generation.
728 CIRGenFunction &cgf;
729 const VarDecl *vd;
730 bool shouldDelete = false;
731 Address oldAddr = Address::invalid();
732
733 public:
735 : cgf(cgf), vd(vd) {
736 auto mapItr = cgf.localDeclMap.find(vd);
737
738 if (mapItr != cgf.localDeclMap.end())
739 oldAddr = mapItr->second;
740 else
741 shouldDelete = true;
742 }
743
745 if (shouldDelete)
746 cgf.localDeclMap.erase(vd);
747 else
748 cgf.localDeclMap.insert_or_assign(vd, oldAddr);
749 }
750 };
751
753
756
757 static bool
759
766
769
773 const clang::CXXRecordDecl *nearestVBase,
774 clang::CharUnits offsetFromNearestVBase,
775 bool baseIsNonVirtualPrimaryBase,
776 const clang::CXXRecordDecl *vtableClass,
777 VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
778 /// Return the Value of the vtable pointer member pointed to by thisAddr.
779 mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
780 const clang::CXXRecordDecl *vtableClass);
781
782 /// Returns whether we should perform a type checked load when loading a
783 /// virtual function for virtual calls to members of RD. This is generally
784 /// true when both vcall CFI and whole-program-vtables are enabled.
786
787 /// Source location information about the default argument or member
788 /// initializer expression we're evaluating, if any.
792
793 /// A scope within which we are constructing the fields of an object which
794 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
795 /// we need to evaluate the CXXDefaultInitExpr within the evaluation.
797 public:
799 : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
800 cgf.cxxDefaultInitExprThis = thisAddr;
801 }
803 cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
804 }
805
806 private:
807 CIRGenFunction &cgf;
808 Address oldCXXDefaultInitExprThis;
809 };
810
811 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
812 /// is overridden to be the object under construction.
814 public:
819 cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
820 cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
821 }
823 cgf.cxxThisValue = oldCXXThisValue;
824 cgf.cxxThisAlignment = oldCXXThisAlignment;
825 }
826
827 public:
829 mlir::Value oldCXXThisValue;
832 };
833
838
840 LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
841
842 /// Construct an address with the natural alignment of T. If a pointer to T
843 /// is expected to be signed, the pointer passed to this function must have
844 /// been signed, and the returned Address will have the pointer authentication
845 /// information needed to authenticate the signed pointer.
847 CharUnits alignment,
848 bool forPointeeType = false,
849 LValueBaseInfo *baseInfo = nullptr) {
850 if (alignment.isZero())
851 alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
852 return Address(ptr, convertTypeForMem(t), alignment);
853 }
854
856 Address value, const CXXRecordDecl *derived,
857 llvm::iterator_range<CastExpr::path_const_iterator> path,
858 bool nullCheckValue, SourceLocation loc);
859
861 mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
862 llvm::iterator_range<CastExpr::path_const_iterator> path,
863 bool nullCheckValue);
864
865 /// Return the VTT parameter that should be passed to a base
866 /// constructor/destructor with virtual bases.
867 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
868 /// to ItaniumCXXABI.cpp together with all the references to VTT.
869 mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
870 bool delegating);
871
874 return makeAddrLValue(addr, ty, LValueBaseInfo(source));
875 }
876
878 return LValue::makeAddr(addr, ty, baseInfo);
879 }
880
881 void initializeVTablePointers(mlir::Location loc,
882 const clang::CXXRecordDecl *rd);
883 void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
884
886
887 /// Return the address of a local variable.
889 auto it = localDeclMap.find(vd);
890 assert(it != localDeclMap.end() &&
891 "Invalid argument to getAddrOfLocalVar(), no decl!");
892 return it->second;
893 }
894
896 mlir::Type fieldType, unsigned index);
897
898 /// Given an opaque value expression, return its LValue mapping if it exists,
899 /// otherwise create one.
901
902 /// Given an opaque value expression, return its RValue mapping if it exists,
903 /// otherwise create one.
905
906 /// Load the value for 'this'. This function is only valid while generating
907 /// code for an C++ member function.
908 /// FIXME(cir): this should return a mlir::Value!
909 mlir::Value loadCXXThis() {
910 assert(cxxThisValue && "no 'this' value for this function");
911 return cxxThisValue;
912 }
914
915 /// Load the VTT parameter to base constructors/destructors have virtual
916 /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
917 /// be abstracted properly.
918 mlir::Value loadCXXVTT() {
919 assert(cxxStructorImplicitParamValue && "no VTT value for this function");
921 }
922
923 /// Convert the given pointer to a complete class to the given direct base.
925 Address value,
926 const CXXRecordDecl *derived,
927 const CXXRecordDecl *base,
928 bool baseIsVirtual);
929
930 /// Determine whether a return value slot may overlap some other object.
932 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
933 // class subobjects. These cases may need to be revisited depending on the
934 // resolution of the relevant core issue.
936 }
937
938 /// Determine whether a base class initialization may overlap some other
939 /// object.
941 const CXXRecordDecl *baseRD,
942 bool isVirtual);
943
944 /// Get an appropriate 'undef' rvalue for the given type.
945 /// TODO: What's the equivalent for MLIR? Currently we're only using this for
946 /// void types so it just returns RValue::get(nullptr) but it'll need
947 /// addressed later.
949
950 cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
951 cir::FuncType funcType);
952
954 FunctionArgList &args);
955
956 /// Emit the function prologue: declare function arguments in the symbol
957 /// table.
958 void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
959 const FunctionDecl *fd, SourceLocation bodyBeginLoc);
960
961 /// Emit code for the start of a function.
962 /// \param loc The location to be associated with the function.
963 /// \param startLoc The location of the function body.
965 cir::FuncOp fn, cir::FuncType funcType,
967 clang::SourceLocation startLoc);
968
969 /// returns true if aggregate type has a volatile member.
971 if (const auto *rd = t->getAsRecordDecl())
972 return rd->hasVolatileMember();
973 return false;
974 }
975
976 void populateUnwindResumeBlock(bool isCleanup, cir::TryOp tryOp);
978 cir::TryOp tryOp);
979
980 /// The cleanup depth enclosing all the cleanups associated with the
981 /// parameters.
983
985 void populateCatchHandlersIfRequired(cir::TryOp tryOp);
986
987 /// Takes the old cleanup stack size and emits the cleanup blocks
988 /// that have been added.
989 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth);
990 void popCleanupBlock();
991
992 /// Push a cleanup to be run at the end of the current full-expression. Safe
993 /// against the possibility that we're currently inside a
994 /// conditionally-evaluated expression.
995 template <class T, class... As>
996 void pushFullExprCleanup(CleanupKind kind, As... a) {
997 // If we're not in a conditional branch, or if none of the
998 // arguments requires saving, then use the unconditional cleanup.
1000 return ehStack.pushCleanup<T>(kind, a...);
1001
1002 cgm.errorNYI("pushFullExprCleanup in conditional branch");
1003 }
1004
1005 /// Enters a new scope for capturing cleanups, all of which
1006 /// will be executed once the scope is exited.
1007 class RunCleanupsScope {
1008 EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
1009
1010 protected:
1013
1014 private:
1015 RunCleanupsScope(const RunCleanupsScope &) = delete;
1016 void operator=(const RunCleanupsScope &) = delete;
1017
1018 protected:
1020
1021 public:
1022 /// Enter a new cleanup scope.
1024 : performCleanup(true), cgf(cgf) {
1025 cleanupStackDepth = cgf.ehStack.stable_begin();
1026 oldDidCallStackSave = cgf.didCallStackSave;
1027 cgf.didCallStackSave = false;
1028 oldCleanupStackDepth = cgf.currentCleanupStackDepth;
1029 cgf.currentCleanupStackDepth = cleanupStackDepth;
1030 }
1031
1032 /// Exit this cleanup scope, emitting any accumulated cleanups.
1034 if (performCleanup)
1035 forceCleanup();
1036 }
1037
1038 /// Force the emission of cleanups now, instead of waiting
1039 /// until this object is destroyed.
1041 assert(performCleanup && "Already forced cleanup");
1042 {
1043 mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
1044 cgf.didCallStackSave = oldDidCallStackSave;
1045 cgf.popCleanupBlocks(cleanupStackDepth);
1046 performCleanup = false;
1047 cgf.currentCleanupStackDepth = oldCleanupStackDepth;
1048 }
1049 }
1050 };
1051
1052 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1054
1055public:
1056 /// Represents a scope, including function bodies, compound statements, and
1057 /// the substatements of if/while/do/for/switch/try statements. This class
1058 /// handles any automatic cleanup, along with the return value.
1059 struct LexicalScope : public RunCleanupsScope {
1060 private:
1061 // Block containing cleanup code for things initialized in this
1062 // lexical context (scope).
1063 mlir::Block *cleanupBlock = nullptr;
1064
1065 // Points to the scope entry block. This is useful, for instance, for
1066 // helping to insert allocas before finalizing any recursive CodeGen from
1067 // switches.
1068 mlir::Block *entryBlock;
1069
1070 LexicalScope *parentScope = nullptr;
1071
1072 // Holds the actual value for ScopeKind::Try
1073 cir::TryOp tryOp = nullptr;
1074
1075 // On a coroutine body, the OnFallthrough sub stmt holds the handler
1076 // (CoreturnStmt) for control flow falling off the body. Keep track
1077 // of emitted co_return in this scope and allow OnFallthrough to be
1078 // skipeed.
1079 bool hasCoreturnStmt = false;
1080
1081 // Only Regular is used at the moment. Support for other kinds will be
1082 // added as the relevant statements/expressions are upstreamed.
1083 enum Kind {
1084 Regular, // cir.if, cir.scope, if_regions
1085 Ternary, // cir.ternary
1086 Switch, // cir.switch
1087 Try, // cir.try
1088 GlobalInit // cir.global initialization code
1089 };
1090 Kind scopeKind = Kind::Regular;
1091
1092 // The scope return value.
1093 mlir::Value retVal = nullptr;
1094
1095 mlir::Location beginLoc;
1096 mlir::Location endLoc;
1097
1098 public:
1099 unsigned depth = 0;
1100
1101 LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
1102 : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
1103 beginLoc(loc), endLoc(loc) {
1104
1105 assert(entryBlock && "LexicalScope requires an entry block");
1106 cgf.curLexScope = this;
1107 if (parentScope)
1108 ++depth;
1109
1110 if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
1111 assert(fusedLoc.getLocations().size() == 2 && "too many locations");
1112 beginLoc = fusedLoc.getLocations()[0];
1113 endLoc = fusedLoc.getLocations()[1];
1114 }
1115 }
1116
1117 void setRetVal(mlir::Value v) { retVal = v; }
1118
1119 void cleanup();
1120 void restore() { cgf.curLexScope = parentScope; }
1121
1124 cleanup();
1125 restore();
1126 }
1127
1128 // ---
1129 // Coroutine tracking
1130 // ---
1131 bool hasCoreturn() const { return hasCoreturnStmt; }
1132 void setCoreturn() { hasCoreturnStmt = true; }
1133
1134 // ---
1135 // Kind
1136 // ---
1137 bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
1138 bool isRegular() { return scopeKind == Kind::Regular; }
1139 bool isSwitch() { return scopeKind == Kind::Switch; }
1140 bool isTernary() { return scopeKind == Kind::Ternary; }
1141 bool isTry() { return scopeKind == Kind::Try; }
1142 cir::TryOp getClosestTryParent();
1143 void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
1144 void setAsSwitch() { scopeKind = Kind::Switch; }
1145 void setAsTernary() { scopeKind = Kind::Ternary; }
1146 void setAsTry(cir::TryOp op) {
1147 scopeKind = Kind::Try;
1148 tryOp = op;
1149 }
1150
1151 // Lazy create cleanup block or return what's available.
1152 mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
1153 if (cleanupBlock)
1154 return cleanupBlock;
1155 cleanupBlock = createCleanupBlock(builder);
1156 return cleanupBlock;
1157 }
1158
1159 cir::TryOp getTry() {
1160 assert(isTry());
1161 return tryOp;
1162 }
1163
1164 mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
1165 return cleanupBlock;
1166 }
1167
1168 mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
1169 // Create the cleanup block but dont hook it up around just yet.
1170 mlir::OpBuilder::InsertionGuard guard(builder);
1171 mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
1172 : &cgf.curFn->getRegion(0);
1173 cleanupBlock = builder.createBlock(r);
1174 return cleanupBlock;
1175 }
1176
1177 // ---
1178 // Return handling.
1179 // ---
1180
1181 private:
1182 // On switches we need one return block per region, since cases don't
1183 // have their own scopes but are distinct regions nonetheless.
1184
1185 // TODO: This implementation should change once we have support for early
1186 // exits in MLIR structured control flow (llvm-project#161575)
1188 llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
1189 llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
1190 std::optional<unsigned> normalRetBlockIndex;
1191
1192 // There's usually only one ret block per scope, but this needs to be
1193 // get or create because of potential unreachable return statements, note
1194 // that for those, all source location maps to the first one found.
1195 mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1196 assert((isa_and_nonnull<cir::CaseOp>(
1197 cgf.builder.getBlock()->getParentOp()) ||
1198 retBlocks.size() == 0) &&
1199 "only switches can hold more than one ret block");
1200
1201 // Create the return block but don't hook it up just yet.
1202 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
1203 auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
1204 retBlocks.push_back(b);
1205 updateRetLoc(b, loc);
1206 return b;
1207 }
1208
1209 cir::ReturnOp emitReturn(mlir::Location loc);
1210 void emitImplicitReturn();
1211
1212 public:
1214 mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
1215 void updateRetLoc(mlir::Block *b, mlir::Location loc) {
1216 retLocs.insert_or_assign(b, loc);
1217 }
1218
1219 mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1220 // Check if we're inside a case region
1221 if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
1222 cgf.builder.getBlock()->getParentOp())) {
1223 auto iter = retBlockInCaseIndex.find(caseOp);
1224 if (iter != retBlockInCaseIndex.end()) {
1225 // Reuse existing return block
1226 mlir::Block *ret = retBlocks[iter->second];
1227 updateRetLoc(ret, loc);
1228 return ret;
1229 }
1230 // Create new return block
1231 mlir::Block *ret = createRetBlock(cgf, loc);
1232 retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
1233 return ret;
1234 }
1235
1236 if (normalRetBlockIndex) {
1237 mlir::Block *ret = retBlocks[*normalRetBlockIndex];
1238 updateRetLoc(ret, loc);
1239 return ret;
1240 }
1241
1242 mlir::Block *ret = createRetBlock(cgf, loc);
1243 normalRetBlockIndex = retBlocks.size() - 1;
1244 return ret;
1245 }
1246
1247 mlir::Block *getEntryBlock() { return entryBlock; }
1248 };
1249
1251
1252 typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
1253
1255
1256 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
1257 QualType type);
1258
1259 void pushDestroy(CleanupKind kind, Address addr, QualType type,
1260 Destroyer *destroyer);
1261
1263
1264 /// ----------------------
1265 /// CIR emit functions
1266 /// ----------------------
1267public:
1268 std::optional<mlir::Value>
1269 emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
1271 llvm::Triple::ArchType arch);
1272 std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
1273 const CallExpr *expr);
1274 std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
1275 const CallExpr *expr);
1276
1277 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
1278 SourceLocation loc,
1279 SourceLocation assumptionLoc,
1280 int64_t alignment,
1281 mlir::Value offsetValue = nullptr);
1282
1283 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
1284 SourceLocation assumptionLoc,
1285 int64_t alignment,
1286 mlir::Value offsetValue = nullptr);
1287
1288private:
1289 void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
1290 clang::CharUnits alignment);
1291
1292 CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
1293
1294public:
1296 llvm::StringRef fieldName,
1297 unsigned fieldIndex);
1298
1299 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1300 mlir::Location loc, clang::CharUnits alignment,
1301 bool insertIntoFnEntryBlock,
1302 mlir::Value arraySize = nullptr);
1303 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1304 mlir::Location loc, clang::CharUnits alignment,
1305 mlir::OpBuilder::InsertPoint ip,
1306 mlir::Value arraySize = nullptr);
1307
1308 void emitAggregateStore(mlir::Value value, Address dest);
1309
1310 void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
1311
1313
1314 /// Emit an aggregate copy.
1315 ///
1316 /// \param isVolatile \c true iff either the source or the destination is
1317 /// volatile.
1318 /// \param MayOverlap Whether the tail padding of the destination might be
1319 /// occupied by some other object. More efficient code can often be
1320 /// generated if not.
1321 void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
1322 AggValueSlot::Overlap_t mayOverlap,
1323 bool isVolatile = false);
1324
1325 /// Emit code to compute the specified expression which can have any type. The
1326 /// result is returned as an RValue struct. If this is an aggregate
1327 /// expression, the aggloc/agglocvolatile arguments indicate where the result
1328 /// should be returned.
1331 bool ignoreResult = false);
1332
1333 /// Emits the code necessary to evaluate an arbitrary expression into the
1334 /// given memory location.
1335 void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
1336 bool isInitializer);
1337
1338 /// Similarly to emitAnyExpr(), however, the result will always be accessible
1339 /// even if no aggregate location is provided.
1341
1342 void emitAnyExprToExn(const Expr *e, Address addr);
1343
1344 void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
1345 QualType elementType, CharUnits elementAlign,
1346 Destroyer *destroyer);
1347
1348 mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
1349 QualType &baseType, Address &addr);
1351
1353
1355 LValueBaseInfo *baseInfo = nullptr);
1356
1357 mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
1358
1360 void emitAtomicInit(Expr *init, LValue dest);
1361 void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
1362 void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
1363 bool isVolatile, bool isInit);
1365 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
1366 llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
1367
1369 mlir::OpBuilder::InsertPoint ip = {});
1370
1371 /// Emit code and set up symbol table for a variable declaration with auto,
1372 /// register, or no storage class specifier. These turn into simple stack
1373 /// objects, globals depending on target.
1374 void emitAutoVarDecl(const clang::VarDecl &d);
1375
1376 void emitAutoVarCleanups(const AutoVarEmission &emission);
1377 /// Emit the initializer for an allocated variable. If this call is not
1378 /// associated with the call to emitAutoVarAlloca (as the address of the
1379 /// emission is not directly an alloca), the allocatedSeparately parameter can
1380 /// be used to suppress the assertions. However, this should only be used in
1381 /// extreme cases, as it doesn't properly reflect the language/AST.
1382 void emitAutoVarInit(const AutoVarEmission &emission);
1383 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1385
1386 void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
1387
1388 void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
1389 CXXCtorInitializer *baseInit);
1390
1392
1393 cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
1394
1395 mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
1396
1397 RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
1399
1400 /// Returns a Value corresponding to the size of the given expression by
1401 /// emitting a `cir.objsize` operation.
1402 ///
1403 /// \param e The expression whose object size to compute
1404 /// \param type Determines the semantics of the object size computation.
1405 /// The type parameter is a 2-bit value where:
1406 /// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
1407 /// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
1408 /// \param resType The result type for the size value
1409 /// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
1410 /// call `cir.objsize` on this value rather than emitting e.
1411 /// \param isDynamic If true, allows runtime evaluation via dynamic mode
1412 mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
1413 cir::IntType resType, mlir::Value emittedE,
1414 bool isDynamic);
1415
1416 mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
1417 unsigned type,
1418 cir::IntType resType,
1419 mlir::Value emittedE,
1420 bool isDynamic);
1421
1422 int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
1423
1425
1426 RValue emitCall(const CIRGenFunctionInfo &funcInfo,
1428 const CallArgList &args, cir::CIRCallOpInterface *callOp,
1429 mlir::Location loc);
1432 const CallArgList &args,
1433 cir::CIRCallOpInterface *callOrTryCall = nullptr) {
1434 assert(currSrcLoc && "source location must have been set");
1435 return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
1436 *currSrcLoc);
1437 }
1438
1439 RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
1441 void emitCallArg(CallArgList &args, const clang::Expr *e,
1442 clang::QualType argType);
1443 void emitCallArgs(
1444 CallArgList &args, PrototypeWrapper prototype,
1445 llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
1446 AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
1451
1452 template <typename T>
1453 mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
1454 mlir::ArrayAttr value,
1455 cir::CaseOpKind kind,
1456 bool buildingTopLevelCase);
1457
1458 mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
1459 mlir::Type condType,
1460 bool buildingTopLevelCase);
1461
1462 LValue emitCastLValue(const CastExpr *e);
1463
1464 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
1465 /// sanitizer is enabled, a runtime check is also emitted.
1466 mlir::Value emitCheckedArgForAssume(const Expr *e);
1467
1468 /// Emit a conversion from the specified complex type to the specified
1469 /// destination type, where the destination type is an LLVM scalar type.
1470 mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
1471 QualType dstTy, SourceLocation loc);
1472
1475
1477
1478 mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
1479 cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1480 cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1481 cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
1482 cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
1483 mlir::Value coroframeAddr);
1485
1486 void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
1487
1489
1490 mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
1491
1492 mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s);
1493
1495 AggValueSlot dest);
1496
1499 Address arrayBegin, const CXXConstructExpr *e,
1500 bool newPointerIsChecked,
1501 bool zeroInitialize = false);
1503 mlir::Value numElements, Address arrayBase,
1504 const CXXConstructExpr *e,
1505 bool newPointerIsChecked,
1506 bool zeroInitialize);
1508 clang::CXXCtorType type, bool forVirtualBase,
1509 bool delegating, AggValueSlot thisAVS,
1510 const clang::CXXConstructExpr *e);
1511
1513 clang::CXXCtorType type, bool forVirtualBase,
1514 bool delegating, Address thisAddr,
1516
1517 void emitCXXDeleteExpr(const CXXDeleteExpr *e);
1518
1520 bool forVirtualBase, bool delegating,
1521 Address thisAddr, QualType thisTy);
1522
1524 mlir::Value thisVal, QualType thisTy,
1525 mlir::Value implicitParam,
1526 QualType implicitParamTy, const CallExpr *e);
1527
1528 mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
1530
1533
1535 const Expr *e, Address base, mlir::Value memberPtr,
1536 const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
1537
1539 const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
1540 ReturnValueSlot returnValue, mlir::Value thisPtr,
1541 mlir::Value implicitParam, clang::QualType implicitParamTy,
1542 const clang::CallExpr *ce, CallArgList *rtlArgs);
1543
1545 const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
1546 ReturnValueSlot returnValue, bool hasQualifier,
1547 clang::NestedNameSpecifier qualifier, bool isArrow,
1548 const clang::Expr *base);
1549
1550 mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
1551
1552 void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
1553 mlir::Type elementTy, Address beginPtr,
1554 mlir::Value numElements,
1555 mlir::Value allocSizeWithoutCookie);
1556
1558 const CXXMethodDecl *md,
1560
1562
1564 const CallExpr *callExpr,
1566
1567 void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
1568 Address ptr);
1569
1570 void emitCXXThrowExpr(const CXXThrowExpr *e);
1571
1572 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
1573
1574 mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s);
1575
1576 void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
1577 bool isFnTryBlock = false);
1578
1579 void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock = false);
1580
1582 clang::CXXCtorType ctorType, FunctionArgList &args);
1583
1584 // It's important not to confuse this and emitDelegateCXXConstructorCall.
1585 // Delegating constructors are the C++11 feature. The constructor delegate
1586 // optimization is used to reduce duplication in the base and complete
1587 // constructors where they are substantially the same.
1589 const FunctionArgList &args);
1590
1591 void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
1592 QualType deleteTy);
1593
1594 mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
1595
1596 mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
1597
1598 /// Emit an expression as an initializer for an object (variable, field, etc.)
1599 /// at the given location. The expression is not necessarily the normal
1600 /// initializer for the object, and the address is not necessarily
1601 /// its normal location.
1602 ///
1603 /// \param init the initializing expression
1604 /// \param d the object to act as if we're initializing
1605 /// \param lvalue the lvalue to initialize
1606 /// \param capturedByInit true if \p d is a __block variable whose address is
1607 /// potentially changed by the initializer
1608 void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
1609 LValue lvalue, bool capturedByInit = false);
1610
1611 mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
1612
1613 mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
1614
1615 mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
1616
1618
1620 clang::Expr *init);
1621
1623
1624 mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
1625
1626 mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
1627
1628 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
1629
1630 void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
1631
1632 mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
1633 llvm::ArrayRef<mlir::Value> args = {});
1634
1635 void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc);
1636
1637 /// Emit the computation of the specified expression of scalar type.
1638 mlir::Value emitScalarExpr(const clang::Expr *e,
1639 bool ignoreResultAssign = false);
1640
1641 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
1642 cir::UnaryOpKind kind, bool isPre);
1643
1644 /// Build a debug stoppoint if we are emitting debug info.
1645 void emitStopPoint(const Stmt *s);
1646
1647 // Build CIR for a statement. useCurrentScope should be true if no
1648 // new scopes need be created when finding a compound statement.
1649 mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
1650 llvm::ArrayRef<const Attr *> attrs = {});
1651
1652 mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
1653 bool useCurrentScope);
1654
1655 mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
1656
1657 void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
1658 CallArgList &callArgs);
1659
1660 RValue emitCoawaitExpr(const CoawaitExpr &e,
1661 AggValueSlot aggSlot = AggValueSlot::ignored(),
1662 bool ignoreResult = false);
1663 /// Emit the computation of the specified expression of complex type,
1664 /// returning the result.
1665 mlir::Value emitComplexExpr(const Expr *e);
1666
1667 void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
1668
1669 mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv,
1670 cir::UnaryOpKind op, bool isPre);
1671
1675 mlir::Value &result);
1676
1677 mlir::LogicalResult
1678 emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
1679 AggValueSlot slot = AggValueSlot::ignored());
1680
1681 mlir::LogicalResult
1683 Address *lastValue = nullptr,
1684 AggValueSlot slot = AggValueSlot::ignored());
1685
1686 void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
1687 mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
1688 LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
1689
1690 mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
1691 mlir::Type condType,
1692 bool buildingTopLevelCase);
1693
1695 clang::CXXCtorType ctorType,
1696 const FunctionArgList &args,
1698
1699 /// We are performing a delegate call; that is, the current function is
1700 /// delegating to another one. Produce a r-value suitable for passing the
1701 /// given parameter.
1702 void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
1704
1705 /// Emit an `if` on a boolean condition to the specified blocks.
1706 /// FIXME: Based on the condition, this might try to simplify the codegen of
1707 /// the conditional based on the branch.
1708 /// In the future, we may apply code generation simplifications here,
1709 /// similar to those used in classic LLVM codegen
1710 /// See `EmitBranchOnBoolExpr` for inspiration.
1711 mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
1712 const clang::Stmt *thenS,
1713 const clang::Stmt *elseS);
1714 cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
1715 BuilderCallbackRef thenBuilder,
1716 mlir::Location thenLoc,
1717 BuilderCallbackRef elseBuilder,
1718 std::optional<mlir::Location> elseLoc = {});
1719
1720 mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
1721
1722 LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
1723
1724 mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
1725 mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
1726
1727 void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
1728 void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
1729
1730 void populateCatchHandlers(cir::TryOp tryOp);
1731
1732 mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
1733
1734 /// Emit code to compute the specified expression,
1735 /// ignoring the result.
1736 void emitIgnoredExpr(const clang::Expr *e);
1737
1738 RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
1739
1740 /// Load a complex number from the specified l-value.
1741 mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
1742
1743 RValue emitLoadOfExtVectorElementLValue(LValue lv);
1744
1745 /// Given an expression that represents a value lvalue, this method emits
1746 /// the address of the lvalue, then loads the result as an rvalue,
1747 /// returning the rvalue.
1748 RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
1749
1750 Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
1751 LValueBaseInfo *pointeeBaseInfo);
1752 LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
1753 QualType refTy, AlignmentSource source);
1754
1755 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1756 /// care to appropriately convert from the memory representation to
1757 /// the LLVM value representation. The l-value must be a simple
1758 /// l-value.
1759 mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
1760 mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
1761 SourceLocation loc, LValueBaseInfo baseInfo);
1762
1763 /// Emit code to compute a designator that specifies the location
1764 /// of the expression.
1765 /// FIXME: document this function better.
1766 LValue emitLValue(const clang::Expr *e);
1767 LValue emitLValueForBitField(LValue base, const FieldDecl *field);
1768 LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
1769
1770 LValue emitLValueForLambdaField(const FieldDecl *field);
1771 LValue emitLValueForLambdaField(const FieldDecl *field,
1772 mlir::Value thisValue);
1773
1774 /// Like emitLValueForField, excpet that if the Field is a reference, this
1775 /// will return the address of the reference and not the address of the value
1776 /// stored in the reference.
1777 LValue emitLValueForFieldInitialization(LValue base,
1778 const clang::FieldDecl *field,
1779 llvm::StringRef fieldName);
1780
1781 LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
1782
1783 LValue emitMemberExpr(const MemberExpr *e);
1784
1785 LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
1786
1787 LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
1788
1789 /// Given an expression with a pointer type, emit the value and compute our
1790 /// best estimate of the alignment of the pointee.
1791 ///
1792 /// One reasonable way to use this information is when there's a language
1793 /// guarantee that the pointer must be aligned to some stricter value, and
1794 /// we're simply trying to ensure that sufficiently obvious uses of under-
1795 /// aligned objects don't get miscompiled; for example, a placement new
1796 /// into the address of a local variable. In such a case, it's quite
1797 /// reasonable to just ignore the returned alignment when it isn't from an
1798 /// explicit source.
1800 LValueBaseInfo *baseInfo = nullptr);
1801
1802 /// Emits a reference binding to the passed in expression.
1803 RValue emitReferenceBindingToExpr(const Expr *e);
1804
1805 mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
1806
1807 RValue emitRotate(const CallExpr *e, bool isRotateLeft);
1808
1809 mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
1810
1811 /// Emit a conversion from the specified type to the specified destination
1812 /// type, both of which are CIR scalar types.
1813 mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
1814 clang::QualType dstType,
1816
1817 void emitScalarInit(const clang::Expr *init, mlir::Location loc,
1818 LValue lvalue, bool capturedByInit = false);
1819
1820 mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
1821 const Expr *argExpr);
1822
1823 void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
1824
1825 void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
1826 bool isInit);
1827
1828 void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
1829 clang::QualType ty, LValueBaseInfo baseInfo,
1830 bool isInit = false, bool isNontemporal = false);
1831 void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
1832
1833 /// Store the specified rvalue into the specified
1834 /// lvalue, where both are guaranteed to the have the same type, and that type
1835 /// is 'Ty'.
1836 void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
1837
1838 mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
1839
1840 LValue emitStringLiteralLValue(const StringLiteral *e,
1841 llvm::StringRef name = ".str");
1842
1843 mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
1844 mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
1845 bool buildingTopLevelCase);
1846 mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
1847
1848 std::optional<mlir::Value>
1849 emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
1850 ReturnValueSlot &returnValue);
1851
1852 /// Given a value and its clang type, returns the value casted to its memory
1853 /// representation.
1854 /// Note: CIR defers most of the special casting to the final lowering passes
1855 /// to conserve the high level information.
1856 mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
1857
1858 /// Emit a trap instruction, which is used to abort the program in an abnormal
1859 /// way, usually for debugging purposes.
1860 /// \p createNewBlock indicates whether to create a new block for the IR
1861 /// builder. Since the `cir.trap` operation is a terminator, operations that
1862 /// follow a trap cannot be emitted after `cir.trap` in the same block. To
1863 /// ensure these operations get emitted successfully, you need to create a new
1864 /// dummy block and set the insertion point there before continuing from the
1865 /// trap operation.
1866 void emitTrap(mlir::Location loc, bool createNewBlock);
1867
1868 LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
1869
1870 mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
1871
1872 /// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
1873 /// checking is enabled. Otherwise, just emit an unreachable instruction.
1874 /// \p createNewBlock indicates whether to create a new block for the IR
1875 /// builder. Since the `cir.unreachable` operation is a terminator, operations
1876 /// that follow an unreachable point cannot be emitted after `cir.unreachable`
1877 /// in the same block. To ensure these operations get emitted successfully,
1878 /// you need to create a dummy block and set the insertion point there before
1879 /// continuing from the unreachable point.
1880 void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
1881
1882 /// This method handles emission of any variable declaration
1883 /// inside a function, including static vars etc.
1884 void emitVarDecl(const clang::VarDecl &d);
1885
1886 void emitVariablyModifiedType(QualType ty);
1887
1888 mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
1889
1890 std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
1891 const CallExpr *expr);
1892
1893 /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
1894 /// nonnull, if 1\p LHS is marked _Nonnull.
1895 void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
1897
1898 /// An object to manage conditionally-evaluated expressions.
1900 CIRGenFunction &cgf;
1901 mlir::OpBuilder::InsertPoint insertPt;
1902
1903 public:
1905 : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
1906 ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
1907 : cgf(cgf), insertPt(ip) {}
1908
1910 assert(cgf.outermostConditional != this);
1911 if (!cgf.outermostConditional)
1912 cgf.outermostConditional = this;
1913 }
1914
1916 assert(cgf.outermostConditional != nullptr);
1917 if (cgf.outermostConditional == this)
1918 cgf.outermostConditional = nullptr;
1919 }
1920
1921 /// Returns the insertion point which will be executed prior to each
1922 /// evaluation of the conditional code. In LLVM OG, this method
1923 /// is called getStartingBlock.
1924 mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
1925 };
1926
1928 std::optional<LValue> lhs{}, rhs{};
1929 mlir::Value result{};
1930 };
1931
1932 // Return true if we're currently emitting one branch or the other of a
1933 // conditional expression.
1934 bool isInConditionalBranch() const { return outermostConditional != nullptr; }
1935
1936 void setBeforeOutermostConditional(mlir::Value value, Address addr) {
1937 assert(isInConditionalBranch());
1938 {
1939 mlir::OpBuilder::InsertionGuard guard(builder);
1940 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
1941 builder.createStore(
1942 value.getLoc(), value, addr, /*isVolatile=*/false,
1943 mlir::IntegerAttr::get(
1944 mlir::IntegerType::get(value.getContext(), 64),
1945 (uint64_t)addr.getAlignment().getAsAlign().value()));
1946 }
1947 }
1948
1949 // Points to the outermost active conditional control. This is used so that
1950 // we know if a temporary should be destroyed conditionally.
1952
1953 /// An RAII object to record that we're evaluating a statement
1954 /// expression.
1956 CIRGenFunction &cgf;
1957
1958 /// We have to save the outermost conditional: cleanups in a
1959 /// statement expression aren't conditional just because the
1960 /// StmtExpr is.
1961 ConditionalEvaluation *savedOutermostConditional;
1962
1963 public:
1965 : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
1966 cgf.outermostConditional = nullptr;
1967 }
1968
1970 cgf.outermostConditional = savedOutermostConditional;
1971 }
1972 };
1973
1974 template <typename FuncTy>
1975 ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
1976 const FuncTy &branchGenFunc);
1977
1978 mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
1979 const clang::Stmt *thenS,
1980 const clang::Stmt *elseS);
1981
1982 /// Build a "reference" to a va_list; this is either the address or the value
1983 /// of the expression, depending on how va_list is defined.
1984 Address emitVAListRef(const Expr *e);
1985
1986 /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
1987 ///
1988 /// \param vaList A reference to the \c va_list as emitted by either
1989 /// \c emitVAListRef or \c emitMSVAListRef.
1990 ///
1991 /// \param count The number of arguments in \c vaList
1992 void emitVAStart(mlir::Value vaList, mlir::Value count);
1993
1994 /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
1995 ///
1996 /// \param vaList A reference to the \c va_list as emitted by either
1997 /// \c emitVAListRef or \c emitMSVAListRef.
1998 void emitVAEnd(mlir::Value vaList);
1999
2000 /// Generate code to get an argument from the passed in pointer
2001 /// and update it accordingly.
2002 ///
2003 /// \param ve The \c VAArgExpr for which to generate code.
2004 ///
2005 /// \param vaListAddr Receives a reference to the \c va_list as emitted by
2006 /// either \c emitVAListRef or \c emitMSVAListRef.
2007 ///
2008 /// \returns SSA value with the argument.
2009 mlir::Value emitVAArg(VAArgExpr *ve);
2010
2011 /// ----------------------
2012 /// CIR build helpers
2013 /// -----------------
2014public:
2015 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2016 const Twine &name = "tmp",
2017 mlir::Value arraySize = nullptr,
2018 bool insertIntoFnEntryBlock = false);
2019 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2020 const Twine &name = "tmp",
2021 mlir::OpBuilder::InsertPoint ip = {},
2022 mlir::Value arraySize = nullptr);
2023 Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
2024 const Twine &name = "tmp",
2025 mlir::Value arraySize = nullptr,
2026 Address *alloca = nullptr,
2027 mlir::OpBuilder::InsertPoint ip = {});
2028 Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
2029 mlir::Location loc,
2030 const Twine &name = "tmp",
2031 mlir::Value arraySize = nullptr,
2032 mlir::OpBuilder::InsertPoint ip = {});
2033
2034 /// Create a temporary memory object of the given type, with
2035 /// appropriate alignmen and cast it to the default address space. Returns
2036 /// the original alloca instruction by \p Alloca if it is not nullptr.
2037 Address createMemTemp(QualType t, mlir::Location loc,
2038 const Twine &name = "tmp", Address *alloca = nullptr,
2039 mlir::OpBuilder::InsertPoint ip = {});
2040 Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
2041 const Twine &name = "tmp", Address *alloca = nullptr,
2042 mlir::OpBuilder::InsertPoint ip = {});
2043
2044 //===--------------------------------------------------------------------===//
2045 // OpenMP Emission
2046 //===--------------------------------------------------------------------===//
2047public:
2048 mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
2049 mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
2050 mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
2051 mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
2052 mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
2053 mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
2054 mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
2055 mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
2056 mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
2057 mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
2058 mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
2059 mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
2060 mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
2061 mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
2062 mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
2063 mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
2064 mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
2065 mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
2066 mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
2067 mlir::LogicalResult
2068 emitOMPParallelForDirective(const OMPParallelForDirective &s);
2069 mlir::LogicalResult
2070 emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
2071 mlir::LogicalResult
2072 emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
2073 mlir::LogicalResult
2074 emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
2075 mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
2076 mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
2077 mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
2078 mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
2079 mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
2080 mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
2081 mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
2082 mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
2083 mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
2084 mlir::LogicalResult
2085 emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s);
2086 mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
2087 mlir::LogicalResult
2088 emitOMPTargetDataDirective(const OMPTargetDataDirective &s);
2089 mlir::LogicalResult
2090 emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s);
2091 mlir::LogicalResult
2092 emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s);
2093 mlir::LogicalResult
2094 emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s);
2095 mlir::LogicalResult
2096 emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s);
2097 mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
2098 mlir::LogicalResult
2099 emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s);
2100 mlir::LogicalResult
2101 emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s);
2102 mlir::LogicalResult
2103 emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s);
2104 mlir::LogicalResult
2105 emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s);
2106 mlir::LogicalResult
2107 emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s);
2108 mlir::LogicalResult
2109 emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s);
2110 mlir::LogicalResult
2111 emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
2112 mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
2113 const OMPParallelMaskedTaskLoopDirective &s);
2115 const OMPParallelMaskedTaskLoopSimdDirective &s);
2116 mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
2117 const OMPParallelMasterTaskLoopDirective &s);
2119 const OMPParallelMasterTaskLoopSimdDirective &s);
2120 mlir::LogicalResult
2121 emitOMPDistributeDirective(const OMPDistributeDirective &s);
2122 mlir::LogicalResult emitOMPDistributeParallelForDirective(
2123 const OMPDistributeParallelForDirective &s);
2125 const OMPDistributeParallelForSimdDirective &s);
2126 mlir::LogicalResult
2127 emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s);
2129 const OMPTargetParallelGenericLoopDirective &s);
2130 mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
2131 const OMPTargetParallelForSimdDirective &s);
2132 mlir::LogicalResult
2133 emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s);
2134 mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
2135 const OMPTargetTeamsGenericLoopDirective &s);
2136 mlir::LogicalResult
2137 emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s);
2138 mlir::LogicalResult
2139 emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s);
2140 mlir::LogicalResult
2141 emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s);
2143 const OMPTeamsDistributeParallelForSimdDirective &s);
2145 const OMPTeamsDistributeParallelForDirective &s);
2146 mlir::LogicalResult
2147 emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s);
2148 mlir::LogicalResult
2149 emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s);
2150 mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
2151 const OMPTargetTeamsDistributeDirective &s);
2153 const OMPTargetTeamsDistributeParallelForDirective &s);
2155 const OMPTargetTeamsDistributeParallelForSimdDirective &s);
2157 const OMPTargetTeamsDistributeSimdDirective &s);
2158 mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
2159 mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
2160 mlir::LogicalResult
2161 emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s);
2162 mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
2163 mlir::LogicalResult
2164 emitOMPInterchangeDirective(const OMPInterchangeDirective &s);
2165 mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
2166 mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
2167 mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
2168
2169 void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d);
2170 void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d);
2171 void emitOMPCapturedExpr(const OMPCapturedExprDecl &d);
2172 void emitOMPAllocateDecl(const OMPAllocateDecl &d);
2173 void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d);
2174 void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d);
2175 void emitOMPRequiresDecl(const OMPRequiresDecl &d);
2176
2177private:
2178 template <typename Op>
2179 void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
2180
2181 //===--------------------------------------------------------------------===//
2182 // OpenACC Emission
2183 //===--------------------------------------------------------------------===//
2184private:
2185 template <typename Op>
2186 Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
2187 llvm::ArrayRef<const OpenACCClause *> clauses);
2188 // Function to do the basic implementation of an operation with an Associated
2189 // Statement. Models AssociatedStmtConstruct.
2190 template <typename Op, typename TermOp>
2191 mlir::LogicalResult
2192 emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
2193 OpenACCDirectiveKind dirKind,
2194 llvm::ArrayRef<const OpenACCClause *> clauses,
2195 const Stmt *associatedStmt);
2196
2197 template <typename Op, typename TermOp>
2198 mlir::LogicalResult emitOpenACCOpCombinedConstruct(
2199 mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
2200 llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
2201
2202 template <typename Op>
2203 void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
2204 ArrayRef<const OpenACCClause *> clauses);
2205 // The second template argument doesn't need to be a template, since it should
2206 // always be an mlir::acc::LoopOp, but as this is a template anyway, we make
2207 // it a template argument as this way we can avoid including the OpenACC MLIR
2208 // headers here. We will count on linker failures/explicit instantiation to
2209 // ensure we don't mess this up, but it is only called from 1 place, and
2210 // instantiated 3x.
2211 template <typename ComputeOp, typename LoopOp>
2212 void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
2213 OpenACCDirectiveKind dirKind,
2214 ArrayRef<const OpenACCClause *> clauses);
2215
2216 // The OpenACC LoopOp requires that we have auto, seq, or independent on all
2217 // LoopOp operations for the 'none' device type case. This function checks if
2218 // the LoopOp has one, else it updates it to have one.
2219 void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
2221
2222 // The OpenACC 'cache' construct actually applies to the 'loop' if present. So
2223 // keep track of the 'loop' so that we can add the cache vars to it correctly.
2224 mlir::acc::LoopOp *activeLoopOp = nullptr;
2225
2226 struct ActiveOpenACCLoopRAII {
2227 CIRGenFunction &cgf;
2228 mlir::acc::LoopOp *oldLoopOp;
2229
2230 ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
2231 : cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
2232 cgf.activeLoopOp = newOp;
2233 }
2234 ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
2235 };
2236
2237 // Keep track of the last place we inserted a 'recipe' so that we can insert
2238 // the next one in lexical order.
2239 mlir::OpBuilder::InsertPoint lastRecipeLocation;
2240
2241public:
2242 // Helper type used to store the list of important information for a 'data'
2243 // clause variable, or a 'cache' variable reference.
2245 mlir::Location beginLoc;
2246 mlir::Value varValue;
2247 std::string name;
2248 // The type of the original variable reference: that is, after 'bounds' have
2249 // removed pointers/array types/etc. So in the case of int arr[5], and a
2250 // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
2254 // The list of types that we found when going through the bounds, which we
2255 // can use to properly set the alloca section.
2257 };
2258
2259 // Gets the collection of info required to lower and OpenACC clause or cache
2260 // construct variable reference.
2262 // Helper function to emit the integer expressions as required by an OpenACC
2263 // clause/construct.
2264 mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
2265 // Helper function to emit an integer constant as an mlir int type, used for
2266 // constants in OpenACC constructs/clauses.
2267 mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
2268 int64_t value);
2269
2270 mlir::LogicalResult
2272 mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
2273 mlir::LogicalResult
2275 mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
2276 mlir::LogicalResult
2278 mlir::LogicalResult
2280 mlir::LogicalResult
2282 mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
2283 mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
2284 mlir::LogicalResult
2286 mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
2287 mlir::LogicalResult
2289 mlir::LogicalResult
2291 mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
2292
2295
2296 /// Create a temporary memory object for the given aggregate type.
2297 AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
2298 const Twine &name = "tmp",
2299 Address *alloca = nullptr) {
2301 return AggValueSlot::forAddr(
2302 createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
2305 }
2306
2307private:
2308 QualType getVarArgType(const Expr *arg);
2309};
2310
2311} // namespace clang::CIRGen
2312
2313#endif
Defines the clang::ASTContext interface.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
Defines the clang::Expr interface and subclasses for C++ expressions.
Defines an enumeration for C++ overloaded operators.
C Language Family Type Representation.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
Represents a member of a struct/union/class.
Definition Decl.h:3160
This class represents a 'loop' construct. The 'loop' construct applies to a 'for' loop (or range-for ...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4353
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3723
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3268
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6880
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition Expr.h:4453
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition Expr.h:4491
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition Expr.h:4488
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
BreakStmt - This represents a break.
Definition Stmt.h:3126
mlir::Value getPointer() const
Definition Address.h:90
static Address invalid()
Definition Address.h:69
clang::CharUnits getAlignment() const
Definition Address.h:130
mlir::Value getBasePointer() const
Definition Address.h:95
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
AbstractCallee(const clang::FunctionDecl *fd)
const clang::ParmVarDecl * getParamDecl(unsigned I) const
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
An object to manage conditionally-evaluated expressions.
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
mlir::OpBuilder::InsertPoint getInsertPoint() const
Returns the insertion point which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forReference(mlir::TypedAttr c)
static ConstantEmission forValue(mlir::TypedAttr c)
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, LValue lvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
RunCleanupsScope(CIRGenFunction &cgf)
Enter a new cleanup scope.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void restore()
Can be used to restore the state early, before the dtor is run.
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value)
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
static bool hasScalarEvaluationKind(clang::QualType type)
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
void emitOpenACCRoutine(const OpenACCRoutineDecl &d)
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, cir::GlobalOp gv, cir::GetGlobalOp gvAddr)
Add the initializer for 'd' to the global variable that has already been created for it.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard SourceLocExprScopeGuard
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, mlir::OpBuilder::InsertPoint ip={})
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
ImplicitParamDecl * cxxabiThisDecl
CXXThisDecl - When generating code for a C++ member function, this will hold the implicit 'this' decl...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
llvm::ScopedHashTable< const clang::Decl *, mlir::Value > SymTableTy
The symbol table maps a variable name to a value in the current scope.
void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc)
Definition CIRGenCXX.cpp:32
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, CallArgList &callArgs)
mlir::Block * getCurFunctionEntryBlock()
void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp, bool isFnTryBlock=false)
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
void emitOMPRequiresDecl(const OMPRequiresDecl &d)
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
void populateEHCatchRegions(EHScopeStack::stable_iterator scope, cir::TryOp tryOp)
Address cxxDefaultInitExprThis
The value of 'this' to sue when evaluating CXXDefaultInitExprs within this expression.
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock=false)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
void setBeforeOutermostConditional(mlir::Value value, Address addr)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::Value loadCXXThis()
Load the value for 'this'.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
const clang::Decl * curFuncDecl
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
LValue emitLValueForLambdaField(const FieldDecl *field)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOrTryCall=nullptr)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
bool isTrivialInitializer(const Expr *init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void emitOpenACCDeclare(const OpenACCDeclareDecl &d)
Address getAddrOfLocalVar(const clang::VarDecl *vd)
Return the address of a local variable.
void emitAnyExprToExn(const Expr *e, Address addr)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy)
JumpDest returnBlock(mlir::Block *retBlock)
Unified return block.
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, bool delegating)
Return the VTT parameter that should be passed to a base constructor/destructor with virtual bases.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
void initializeVTablePointers(mlir::Location loc, const clang::CXXRecordDecl *rd)
mlir::Type convertType(const TypeDecl *t)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d)
void emitAggregateStore(mlir::Value value, Address dest)
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
ConditionalEvaluation * outermostConditional
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit)
RValue emitAtomicExpr(AtomicExpr *e)
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, LValue lvalue, bool capturedByInit=false)
Emit an expression as an initializer for an object (variable, field, etc.) at the given location.
mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s)
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
void emitVAStart(mlir::Value vaList, mlir::Value count)
Emits the start of a CIR variable-argument operation (cir.va_start)
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass)
const TargetCIRGenInfo & getTargetHooks() const
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
JumpDest getJumpDestInCurrentScope(mlir::Block *target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e)
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
CleanupKind getCleanupKind(QualType::DestructionKind kind)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
Definition CIRGenAsm.cpp:86
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
void emitOMPAllocateDecl(const OMPAllocateDecl &d)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType)
ImplicitParamDecl * cxxStructorImplicitParamDecl
When generating code for a constructor or destructor, this will hold the implicit argument (e....
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType)
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
void populateCatchHandlersIfRequired(cir::TryOp tryOp)
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
void populateUnwindResumeBlock(bool isCleanup, cir::TryOp tryOp)
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
LValue emitAggExprToLValue(const Expr *e)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
clang::CurrentSourceLocExprScope curSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
mlir::Value loadCXXVTT()
Load the VTT parameter to base constructors/destructors have virtual bases.
void emitVarDecl(const clang::VarDecl &d)
This method handles emission of any variable declaration inside a function, including static vars etc...
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo)
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr, const clang::CXXRecordDecl *vtableClass)
Return the Value of the vtable pointer member pointed to by thisAddr.
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Destroys all the elements of the given array, beginning from last to first.
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
RValue emitAnyExprToTemp(const clang::Expr *e)
Similarly to emitAnyExpr(), however, the result will always be accessible even if no aggregate locati...
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS)
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::SmallPtrSet< const clang::CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
bool hasVolatileMember(QualType t)
returns true if aggregate type has a volatile member.
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
void emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType)
clang::FieldDecl * lambdaThisCaptureField
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
void emitConstructorBody(FunctionArgList &args)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
bool haveInsertPoint() const
True if an insertion point is defined.
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void maybeEmitDeferredVarDeclInit(const VarDecl *vd)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
std::optional< mlir::Value > emitAArch64SMEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void pushStackRestore(CleanupKind kind, Address spMem)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
void emitAutoVarDecl(const clang::VarDecl &d)
Emit code and set up symbol table for a variable declaration with auto, register, or no storage class...
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
mlir::Value emitOpenACCIntExpr(const Expr *intExpr)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer)
Immediately perform the destruction of the given object.
const CIRGenModule & getCIRGenModule() const
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty)
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
void emitAtomicInit(Expr *init, LValue dest)
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, int64_t value)
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d)
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, llvm::ArrayRef< mlir::Value > args={})
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
void emitCXXThrowExpr(const CXXThrowExpr *e)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
llvm::SmallVector< VPtr, 4 > VPtrsVector
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
bool sawAsmBlock
Whether or not a Microsoft-style asm block has been processed within this fuction.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
EHScopeStack::stable_iterator currentCleanupStackDepth
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
void pushFullExprCleanup(CleanupKind kind, As... a)
Push a cleanup to be run at the end of the current full-expression.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc)
We are performing a delegate call; that is, the current function is delegating to another one.
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
void populateCatchHandlers(cir::TryOp tryOp)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
std::optional< mlir::Value > emitAArch64SVEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args)
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize=nullptr)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
void popCleanupBlock()
Pops a cleanup block.
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
void emitAutoVarCleanups(const AutoVarEmission &emission)
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
A saved depth on the scope stack.
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:254
Represents a call to a C++ constructor.
Definition ExprCXX.h:1548
Represents a C++ constructor within a class.
Definition DeclCXX.h:2604
Represents a C++ base or member initializer.
Definition DeclCXX.h:2369
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1270
A use of a default initializer in a constructor or in aggregate initialization.
Definition ExprCXX.h:1377
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2626
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:481
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2355
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:84
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2745
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
Represents a C++ temporary.
Definition ExprCXX.h:1459
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1208
CXXTryStmt - A C++ try block, including all handlers.
Definition StmtCXX.h:69
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
CaseStmt - Represent a case statement.
Definition Stmt.h:1911
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
Represents a 'co_await' expression.
Definition ExprCXX.h:5369
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4300
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3605
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1731
ContinueStmt - This represents a continue.
Definition Stmt.h:3110
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
SourceLocExprScopeGuard(const Expr *DefaultExpr, CurrentSourceLocExprScope &Current)
Represents the current source location and context used to determine the value of the source location...
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1622
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2823
This represents one expression.
Definition Expr.h:112
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6564
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2879
Represents a function declaration or definition.
Definition Decl.h:2000
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2960
IfStmt - This represents an if/then/else.
Definition Stmt.h:2250
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:2999
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2137
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
ObjCMethodDecl - Represents an instance or class method declaration.
Definition DeclObjC.h:140
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
Represents a parameter to a function.
Definition Decl.h:1790
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8332
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3151
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2500
Exposes information about the current target.
Definition TargetInfo.h:226
Represents a declaration of a type.
Definition Decl.h:3513
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4957
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2688
#define bool
Definition gpuintrin.h:32
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
OpenACCDirectiveKind
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start, SourceLocation DirectiveLoc, SourceLocation End, ArrayRef< const OpenACCClause * > Clauses, Stmt *StructuredBlock)
const FunctionProtoType * T
CXXDtorType
C++ destructor types.
Definition ABI.h:34
#define true
Definition stdbool.h:25
static bool aggValueSlot()
static bool peepholeProtection()
static bool opAllocaEscapeByReference()
static bool generateDebugInfo()
AutoVarEmission(const clang::VarDecl &variable)
bool isEscapingByRef
True if the variable is a __block variable that is captured by an escaping block.
Address addr
The address of the alloca for languages with explicit address space (e.g.
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
bool isConstantAggregate
True if the variable is of aggregate type and has a constant initializer.
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
Address getObjectAddress(CIRGenFunction &cgf) const
Returns the address of the object within this declaration.
std::unique_ptr< CGCoroData > data
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
mlir::Block * createCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc)
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
void updateRetLoc(mlir::Block *b, mlir::Location loc)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
llvm::PointerUnion< const clang::FunctionProtoType *, const clang::ObjCMethodDecl * > p
PrototypeWrapper(const clang::ObjCMethodDecl *md)
PrototypeWrapper(const clang::FunctionProtoType *ft)
const clang::CXXRecordDecl * vtableClass
const clang::CXXRecordDecl * nearestVBase
VlaSizePair(mlir::Value num, QualType ty)