clang 23.0.0git
CIRGenFunction.h
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
14#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
15
16#include "CIRGenBuilder.h"
17#include "CIRGenCall.h"
18#include "CIRGenModule.h"
19#include "CIRGenTypeCache.h"
20#include "CIRGenValue.h"
21#include "EHScopeStack.h"
22
23#include "Address.h"
24
27#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/ExprCXX.h"
31#include "clang/AST/Stmt.h"
32#include "clang/AST/Type.h"
38#include "llvm/ADT/ScopedHashTable.h"
39
40namespace {
41class ScalarExprEmitter;
42} // namespace
43
44namespace mlir {
45namespace acc {
46class LoopOp;
47} // namespace acc
48} // namespace mlir
49
50namespace clang::CIRGen {
51
52struct CGCoroData;
53
55public:
57
58private:
59 friend class ::ScalarExprEmitter;
60 /// The builder is a helper class to create IR inside a function. The
61 /// builder is stateful, in particular it keeps an "insertion point": this
62 /// is where the next operations will be introduced.
63 CIRGenBuilderTy &builder;
64
65 /// A jump destination is an abstract label, branching to which may
66 /// require a jump out through normal cleanups.
67 struct JumpDest {
68 JumpDest() = default;
69 JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
70 unsigned index = 0)
71 : block(block) {}
72
73 bool isValid() const { return block != nullptr; }
74 mlir::Block *getBlock() const { return block; }
75 EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
76 unsigned getDestIndex() const { return index; }
77
78 // This should be used cautiously.
79 void setScopeDepth(EHScopeStack::stable_iterator depth) {
80 scopeDepth = depth;
81 }
82
83 private:
84 mlir::Block *block = nullptr;
86 unsigned index;
87 };
88
89public:
90 /// The GlobalDecl for the current function being compiled or the global
91 /// variable currently being initialized.
93
94 /// Unified return block.
95 /// In CIR this is a function because each scope might have
96 /// its associated return block.
97 JumpDest returnBlock(mlir::Block *retBlock) {
98 return getJumpDestInCurrentScope(retBlock);
99 }
100
102
103 /// The compiler-generated variable that holds the return value.
104 std::optional<mlir::Value> fnRetAlloca;
105
106 // Holds coroutine data if the current function is a coroutine. We use a
107 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
108 // in this header.
109 struct CGCoroInfo {
110 std::unique_ptr<CGCoroData> data;
111 CGCoroInfo();
112 ~CGCoroInfo();
113 };
115
116 bool isCoroutine() const { return curCoro.data != nullptr; }
117
118 /// The temporary alloca to hold the return value. This is
119 /// invalid iff the function has no return value.
121
122 /// Tracks function scope overall cleanup handling.
124
126
127 /// A mapping from NRVO variables to the flags used to indicate
128 /// when the NRVO has been applied to this variable.
129 llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
130
131 llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
134
135 /// CXXThisDecl - When generating code for a C++ member function,
136 /// this will hold the implicit 'this' declaration.
138 mlir::Value cxxabiThisValue = nullptr;
139 mlir::Value cxxThisValue = nullptr;
141
142 /// When generating code for a constructor or destructor, this will hold the
143 /// implicit argument (e.g. VTT).
146
147 /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
148 /// expression.
150
151 // Holds the Decl for the current outermost non-closure context
152 const clang::Decl *curFuncDecl = nullptr;
153 /// This is the inner-most code context, which includes blocks.
154 const clang::Decl *curCodeDecl = nullptr;
155
156 /// The current function or global initializer that is generated code for.
157 /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
158 /// global initializers.
159 mlir::Operation *curFn = nullptr;
160
161 /// Save Parameter Decl for coroutine.
163
164 using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
165 /// This keeps track of the CIR allocas or globals for local C
166 /// declarations.
168
169 /// The type of the condition for the emitting switch statement.
171
172 clang::ASTContext &getContext() const { return cgm.getASTContext(); }
173
174 CIRGenBuilderTy &getBuilder() { return builder; }
175
177 const CIRGenModule &getCIRGenModule() const { return cgm; }
178
180 // We currently assume this isn't called for a global initializer.
181 auto fn = mlir::cast<cir::FuncOp>(curFn);
182 return &fn.getRegion().front();
183 }
184
185 /// Sanitizers enabled for this function.
187
188 /// The symbol table maps a variable name to a value in the current scope.
189 /// Entering a function creates a new scope, and the function arguments are
190 /// added to the mapping. When the processing of a function is terminated,
191 /// the scope is destroyed and the mappings created in this scope are
192 /// dropped.
193 using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
195
196 /// Whether a cir.stacksave operation has been added. Used to avoid
197 /// inserting cir.stacksave for multiple VLAs in the same scope.
198 bool didCallStackSave = false;
199
200 /// Whether or not a Microsoft-style asm block has been processed within
201 /// this fuction. These can potentially set the return value.
202 bool sawAsmBlock = false;
203
204 mlir::Type convertTypeForMem(QualType t);
205
206 mlir::Type convertType(clang::QualType t);
207 mlir::Type convertType(const TypeDecl *t) {
208 return convertType(getContext().getTypeDeclType(t));
209 }
210
211 /// Get integer from a mlir::Value that is an int constant or a constant op.
212 static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
213 auto constOp = val.getDefiningOp<cir::ConstantOp>();
214 assert(constOp && "getSExtIntValueFromConstOp call with non ConstantOp");
215 return constOp.getIntValue().getSExtValue();
216 }
217
218 /// Get zero-extended integer from a mlir::Value that is an int constant or a
219 /// constant op.
220 static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
221 auto constOp = val.getDefiningOp<cir::ConstantOp>();
222 assert(constOp && "getZExtIntValueFromConstOp call with non ConstantOp");
223 return constOp.getIntValue().getZExtValue();
224 }
225
226 /// Return the cir::TypeEvaluationKind of QualType \c type.
228
232
236
238 bool suppressNewContext = false);
240
241 CIRGenTypes &getTypes() const { return cgm.getTypes(); }
242
243 const TargetInfo &getTarget() const { return cgm.getTarget(); }
244 mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
245
247 return cgm.getTargetCIRGenInfo();
248 }
249
250 // ---------------------
251 // Opaque value handling
252 // ---------------------
253
254 /// Keeps track of the current set of opaque value expressions.
255 llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
256 llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
257
258 // This keeps track of the associated size for each VLA type.
259 // We track this by the size expression rather than the type itself because
260 // in certain situations, like a const qualifier applied to an VLA typedef,
261 // multiple VLA types can share the same size expression.
262 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
263 // enter/leave scopes.
264 llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
265
266public:
267 /// A non-RAII class containing all the information about a bound
268 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
269 /// this which makes individual mappings very simple; using this
270 /// class directly is useful when you have a variable number of
271 /// opaque values or don't want the RAII functionality for some
272 /// reason.
273 class OpaqueValueMappingData {
274 const OpaqueValueExpr *opaqueValue;
275 bool boundLValue;
276
277 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
278 : opaqueValue(ov), boundLValue(boundLValue) {}
279
280 public:
281 OpaqueValueMappingData() : opaqueValue(nullptr) {}
282
283 static bool shouldBindAsLValue(const Expr *expr) {
284 // gl-values should be bound as l-values for obvious reasons.
285 // Records should be bound as l-values because IR generation
286 // always keeps them in memory. Expressions of function type
287 // act exactly like l-values but are formally required to be
288 // r-values in C.
289 return expr->isGLValue() || expr->getType()->isFunctionType() ||
291 }
292
294 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
295 if (shouldBindAsLValue(ov))
296 return bind(cgf, ov, cgf.emitLValue(e));
297 return bind(cgf, ov, cgf.emitAnyExpr(e));
298 }
299
301 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
302 assert(shouldBindAsLValue(ov));
303 cgf.opaqueLValues.insert(std::make_pair(ov, lv));
304 return OpaqueValueMappingData(ov, true);
305 }
306
308 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
309 assert(!shouldBindAsLValue(ov));
310 cgf.opaqueRValues.insert(std::make_pair(ov, rv));
311
312 OpaqueValueMappingData data(ov, false);
313
314 // Work around an extremely aggressive peephole optimization in
315 // EmitScalarConversion which assumes that all other uses of a
316 // value are extant.
318 return data;
319 }
320
321 bool isValid() const { return opaqueValue != nullptr; }
322 void clear() { opaqueValue = nullptr; }
323
325 assert(opaqueValue && "no data to unbind!");
326
327 if (boundLValue) {
328 cgf.opaqueLValues.erase(opaqueValue);
329 } else {
330 cgf.opaqueRValues.erase(opaqueValue);
332 }
333 }
334 };
335
336 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
338 CIRGenFunction &cgf;
340
341 public:
345
346 /// Build the opaque value mapping for the given conditional
347 /// operator if it's the GNU ?: extension. This is a common
348 /// enough pattern that the convenience operator is really
349 /// helpful.
350 ///
353 : cgf(cgf) {
354 if (mlir::isa<ConditionalOperator>(op))
355 // Leave Data empty.
356 return;
357
359 mlir::cast<BinaryConditionalOperator>(op);
361 e->getCommon());
362 }
363
364 /// Build the opaque value mapping for an OpaqueValueExpr whose source
365 /// expression is set to the expression the OVE represents.
367 : cgf(cgf) {
368 if (ov) {
369 assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
370 "for OVE with no source expression");
371 data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
372 }
373 }
374
376 LValue lvalue)
377 : cgf(cgf),
378 data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
379
381 RValue rvalue)
382 : cgf(cgf),
383 data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
384
385 void pop() {
386 data.unbind(cgf);
387 data.clear();
388 }
389
391 if (data.isValid())
392 data.unbind(cgf);
393 }
394 };
395
396private:
397 /// Declare a variable in the current scope, return success if the variable
398 /// wasn't declared yet.
399 void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
400 mlir::Location loc, clang::CharUnits alignment,
401 bool isParam = false);
402
403public:
404 mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
405
406 void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
407
408private:
409 // Track current variable initialization (if there's one)
410 const clang::VarDecl *currVarDecl = nullptr;
411 class VarDeclContext {
413 const clang::VarDecl *oldVal = nullptr;
414
415 public:
416 VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
417 if (p.currVarDecl)
418 oldVal = p.currVarDecl;
419 p.currVarDecl = value;
420 }
421
422 /// Can be used to restore the state early, before the dtor
423 /// is run.
424 void restore() { p.currVarDecl = oldVal; }
425 ~VarDeclContext() { restore(); }
426 };
427
428public:
429 /// Use to track source locations across nested visitor traversals.
430 /// Always use a `SourceLocRAIIObject` to change currSrcLoc.
431 std::optional<mlir::Location> currSrcLoc;
433 CIRGenFunction &cgf;
434 std::optional<mlir::Location> oldLoc;
435
436 public:
437 SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
438 if (cgf.currSrcLoc)
439 oldLoc = cgf.currSrcLoc;
440 cgf.currSrcLoc = value;
441 }
442
443 /// Can be used to restore the state early, before the dtor
444 /// is run.
445 void restore() { cgf.currSrcLoc = oldLoc; }
447 };
448
450 llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
451
452 /// Hold counters for incrementally naming temporaries
453 unsigned counterRefTmp = 0;
454 unsigned counterAggTmp = 0;
455 std::string getCounterRefTmpAsString();
456 std::string getCounterAggTmpAsString();
457
458 /// Helpers to convert Clang's SourceLocation to a MLIR Location.
459 mlir::Location getLoc(clang::SourceLocation srcLoc);
460 mlir::Location getLoc(clang::SourceRange srcLoc);
461 mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
462
463 const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
464
465 /// True if an insertion point is defined. If not, this indicates that the
466 /// current code being emitted is unreachable.
467 /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
468 /// since we don't yet force null insertion point to designate behavior (like
469 /// LLVM's codegen does) and we probably shouldn't.
470 bool haveInsertPoint() const {
471 return builder.getInsertionBlock() != nullptr;
472 }
473
474 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
475 // an ObjCMethodDecl.
477 llvm::PointerUnion<const clang::FunctionProtoType *,
478 const clang::ObjCMethodDecl *>
480
483 };
484
486
487 /// An abstract representation of regular/ObjC call/message targets.
489 /// The function declaration of the callee.
490 [[maybe_unused]] const clang::Decl *calleeDecl;
491
492 public:
493 AbstractCallee() : calleeDecl(nullptr) {}
494 AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
495
496 bool hasFunctionDecl() const {
497 return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
498 }
499
500 unsigned getNumParams() const {
501 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
502 return fd->getNumParams();
503 return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
504 }
505
506 const clang::ParmVarDecl *getParamDecl(unsigned I) const {
507 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
508 return fd->getParamDecl(I);
509 return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
510 I);
511 }
512 };
513
514 struct VlaSizePair {
515 mlir::Value numElts;
517
518 VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
519 };
520
521 /// Return the number of elements for a single dimension
522 /// for the given array type.
523 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
524
525 /// Returns an MLIR::Value+QualType pair that corresponds to the size,
526 /// in non-variably-sized elements, of a variable length array type,
527 /// plus that largest non-variably-sized element type. Assumes that
528 /// the type has already been emitted with emitVariablyModifiedType.
529 VlaSizePair getVLASize(const VariableArrayType *type);
530 VlaSizePair getVLASize(QualType type);
531
533
534 mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
535 return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
536 }
537
538 void finishFunction(SourceLocation endLoc);
539
540 /// Determine whether the given initializer is trivial in the sense
541 /// that it requires no code to be generated.
542 bool isTrivialInitializer(const Expr *init);
543
544 /// If the specified expression does not fold to a constant, or if it does but
545 /// contains a label, return false. If it constant folds return true and set
546 /// the boolean result in Result.
547 bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
548 bool allowLabels = false);
550 llvm::APSInt &resultInt,
551 bool allowLabels = false);
552
553 /// Return true if the statement contains a label in it. If
554 /// this statement is not executed normally, it not containing a label means
555 /// that we can just remove the code.
556 bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
557
558 Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
559
560 class ConstantEmission {
561 // Cannot use mlir::TypedAttr directly here because of bit availability.
562 llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
563 ConstantEmission(mlir::TypedAttr c, bool isReference)
564 : valueAndIsReference(c, isReference) {}
565
566 public:
568 static ConstantEmission forReference(mlir::TypedAttr c) {
569 return ConstantEmission(c, true);
570 }
571 static ConstantEmission forValue(mlir::TypedAttr c) {
572 return ConstantEmission(c, false);
573 }
574
575 explicit operator bool() const {
576 return valueAndIsReference.getOpaqueValue() != nullptr;
577 }
578
579 bool isReference() const { return valueAndIsReference.getInt(); }
581 assert(isReference());
582 cgf.cgm.errorNYI(refExpr->getSourceRange(),
583 "ConstantEmission::getReferenceLValue");
584 return {};
585 }
586
587 mlir::TypedAttr getValue() const {
588 assert(!isReference());
589 return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
590 }
591 };
592
593 ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
594 ConstantEmission tryEmitAsConstant(const MemberExpr *me);
595
598 /// The address of the alloca for languages with explicit address space
599 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
600 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
601 /// as a global constant.
603
604 /// True if the variable is of aggregate type and has a constant
605 /// initializer.
607
608 /// True if the variable is a __block variable that is captured by an
609 /// escaping block.
610 bool isEscapingByRef = false;
611
612 /// True if the variable was emitted as an offload recipe, and thus doesn't
613 /// have the same sort of alloca initialization.
614 bool emittedAsOffload = false;
615
616 mlir::Value nrvoFlag{};
617
618 struct Invalid {};
620
623
625
626 bool wasEmittedAsGlobal() const { return !addr.isValid(); }
627
629
630 /// Returns the raw, allocated address, which is not necessarily
631 /// the address of the object itself. It is casted to default
632 /// address space for address space agnostic languages.
633 Address getAllocatedAddress() const { return addr; }
634
635 // Changes the stored address for the emission. This function should only
636 // be used in extreme cases, and isn't required to model normal AST
637 // initialization/variables.
639
640 /// Returns the address of the object within this declaration.
641 /// Note that this does not chase the forwarding pointer for
642 /// __block decls.
644 if (!isEscapingByRef)
645 return addr;
646
648 return Address::invalid();
649 }
650 };
651
652 /// The given basic block lies in the current EH scope, but may be a
653 /// target of a potentially scope-crossing jump; get a stable handle
654 /// to which we can perform this jump later.
655 /// CIRGen: this mostly tracks state for figuring out the proper scope
656 /// information, no actual branches are emitted.
657 JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
658 return JumpDest(target, ehStack.getInnermostNormalCleanup(),
660 }
661 /// IndirectBranch - The first time an indirect goto is seen we create a block
662 /// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
663 /// is emitted at the end of the function, once all block destinations have
664 /// been resolved.
665 mlir::Block *indirectGotoBlock = nullptr;
666
669
670 /// Perform the usual unary conversions on the specified expression and
671 /// compare the result against zero, returning an Int1Ty value.
672 mlir::Value evaluateExprAsBool(const clang::Expr *e);
673
674 cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
675 cir::GlobalOp gv,
676 cir::GetGlobalOp gvAddr);
677
678 /// Enter the cleanups necessary to complete the given phase of destruction
679 /// for a destructor. The end result should call destructors on members and
680 /// base classes in reverse order of their construction.
682
683 /// Determines whether an EH cleanup is required to destroy a type
684 /// with the given destruction kind.
685 /// TODO(cir): could be shared with Clang LLVM codegen
687 switch (kind) {
689 return false;
693 return getLangOpts().Exceptions;
695 return getLangOpts().Exceptions &&
696 cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
697 }
698 llvm_unreachable("bad destruction kind");
699 }
700
704
706
707 /// Set the address of a local variable.
709 assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
710 localDeclMap.insert({vd, addr});
711
712 // Add to the symbol table if not there already.
713 if (symbolTable.count(vd))
714 return;
715 symbolTable.insert(vd, addr.getPointer());
716 }
717
718 // Replaces the address of the local variable, if it exists. Else does the
719 // same thing as setAddrOfLocalVar.
721 localDeclMap.insert_or_assign(vd, addr);
722 }
723
724 // A class to allow reverting changes to a var-decl's registration to the
725 // localDeclMap. This is used in cases where things are being inserted into
726 // the variable list but don't follow normal lookup/search rules, like in
727 // OpenACC recipe generation.
729 CIRGenFunction &cgf;
730 const VarDecl *vd;
731 bool shouldDelete = false;
732 Address oldAddr = Address::invalid();
733
734 public:
736 : cgf(cgf), vd(vd) {
737 auto mapItr = cgf.localDeclMap.find(vd);
738
739 if (mapItr != cgf.localDeclMap.end())
740 oldAddr = mapItr->second;
741 else
742 shouldDelete = true;
743 }
744
746 if (shouldDelete)
747 cgf.localDeclMap.erase(vd);
748 else
749 cgf.localDeclMap.insert_or_assign(vd, oldAddr);
750 }
751 };
752
754
757
758 static bool
760
767
770
774 const clang::CXXRecordDecl *nearestVBase,
775 clang::CharUnits offsetFromNearestVBase,
776 bool baseIsNonVirtualPrimaryBase,
777 const clang::CXXRecordDecl *vtableClass,
778 VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
779 /// Return the Value of the vtable pointer member pointed to by thisAddr.
780 mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
781 const clang::CXXRecordDecl *vtableClass);
782
783 /// Returns whether we should perform a type checked load when loading a
784 /// virtual function for virtual calls to members of RD. This is generally
785 /// true when both vcall CFI and whole-program-vtables are enabled.
787
788 /// Source location information about the default argument or member
789 /// initializer expression we're evaluating, if any.
793
794 /// A scope within which we are constructing the fields of an object which
795 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
796 /// we need to evaluate the CXXDefaultInitExpr within the evaluation.
798 public:
800 : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
801 cgf.cxxDefaultInitExprThis = thisAddr;
802 }
804 cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
805 }
806
807 private:
808 CIRGenFunction &cgf;
809 Address oldCXXDefaultInitExprThis;
810 };
811
812 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
813 /// is overridden to be the object under construction.
815 public:
820 cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
821 cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
822 }
824 cgf.cxxThisValue = oldCXXThisValue;
825 cgf.cxxThisAlignment = oldCXXThisAlignment;
826 }
827
828 public:
830 mlir::Value oldCXXThisValue;
833 };
834
839
841 LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
842
843 /// Construct an address with the natural alignment of T. If a pointer to T
844 /// is expected to be signed, the pointer passed to this function must have
845 /// been signed, and the returned Address will have the pointer authentication
846 /// information needed to authenticate the signed pointer.
848 CharUnits alignment,
849 bool forPointeeType = false,
850 LValueBaseInfo *baseInfo = nullptr) {
851 if (alignment.isZero())
852 alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
853 return Address(ptr, convertTypeForMem(t), alignment);
854 }
855
857 Address value, const CXXRecordDecl *derived,
858 llvm::iterator_range<CastExpr::path_const_iterator> path,
859 bool nullCheckValue, SourceLocation loc);
860
862 mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
863 llvm::iterator_range<CastExpr::path_const_iterator> path,
864 bool nullCheckValue);
865
866 /// Return the VTT parameter that should be passed to a base
867 /// constructor/destructor with virtual bases.
868 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
869 /// to ItaniumCXXABI.cpp together with all the references to VTT.
870 mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
871 bool delegating);
872
875 return makeAddrLValue(addr, ty, LValueBaseInfo(source));
876 }
877
879 return LValue::makeAddr(addr, ty, baseInfo);
880 }
881
882 void initializeVTablePointers(mlir::Location loc,
883 const clang::CXXRecordDecl *rd);
884 void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
885
887
888 /// Return the address of a local variable.
890 auto it = localDeclMap.find(vd);
891 assert(it != localDeclMap.end() &&
892 "Invalid argument to getAddrOfLocalVar(), no decl!");
893 return it->second;
894 }
895
897 mlir::Type fieldType, unsigned index);
898
899 /// Given an opaque value expression, return its LValue mapping if it exists,
900 /// otherwise create one.
902
903 /// Given an opaque value expression, return its RValue mapping if it exists,
904 /// otherwise create one.
906
907 /// Load the value for 'this'. This function is only valid while generating
908 /// code for an C++ member function.
909 /// FIXME(cir): this should return a mlir::Value!
910 mlir::Value loadCXXThis() {
911 assert(cxxThisValue && "no 'this' value for this function");
912 return cxxThisValue;
913 }
915
916 /// Load the VTT parameter to base constructors/destructors have virtual
917 /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
918 /// be abstracted properly.
919 mlir::Value loadCXXVTT() {
920 assert(cxxStructorImplicitParamValue && "no VTT value for this function");
922 }
923
924 /// Convert the given pointer to a complete class to the given direct base.
926 Address value,
927 const CXXRecordDecl *derived,
928 const CXXRecordDecl *base,
929 bool baseIsVirtual);
930
931 /// Determine whether a return value slot may overlap some other object.
933 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
934 // class subobjects. These cases may need to be revisited depending on the
935 // resolution of the relevant core issue.
937 }
938
939 /// Determine whether a base class initialization may overlap some other
940 /// object.
942 const CXXRecordDecl *baseRD,
943 bool isVirtual);
944
945 /// Get an appropriate 'undef' rvalue for the given type.
946 /// TODO: What's the equivalent for MLIR? Currently we're only using this for
947 /// void types so it just returns RValue::get(nullptr) but it'll need
948 /// addressed later.
950
951 cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
952 cir::FuncType funcType);
953
955 FunctionArgList &args);
956
957 /// Emit the function prologue: declare function arguments in the symbol
958 /// table.
959 void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
960 const FunctionDecl *fd, SourceLocation bodyBeginLoc);
961
962 /// Emit code for the start of a function.
963 /// \param loc The location to be associated with the function.
964 /// \param startLoc The location of the function body.
966 cir::FuncOp fn, cir::FuncType funcType,
968 clang::SourceLocation startLoc);
969
970 /// returns true if aggregate type has a volatile member.
972 if (const auto *rd = t->getAsRecordDecl())
973 return rd->hasVolatileMember();
974 return false;
975 }
976
977 void populateUnwindResumeBlock(bool isCleanup, cir::TryOp tryOp);
979 cir::TryOp tryOp);
980
981 /// The cleanup depth enclosing all the cleanups associated with the
982 /// parameters.
984
986 void populateCatchHandlersIfRequired(cir::TryOp tryOp);
987
988 /// Takes the old cleanup stack size and emits the cleanup blocks
989 /// that have been added.
990 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth);
991 void popCleanupBlock();
992
993 /// Push a cleanup to be run at the end of the current full-expression. Safe
994 /// against the possibility that we're currently inside a
995 /// conditionally-evaluated expression.
996 template <class T, class... As>
998 // If we're not in a conditional branch, or if none of the
999 // arguments requires saving, then use the unconditional cleanup.
1000 if (!isInConditionalBranch())
1001 return ehStack.pushCleanup<T>(kind, a...);
1002
1003 cgm.errorNYI("pushFullExprCleanup in conditional branch");
1004 }
1005
1006 /// Enters a new scope for capturing cleanups, all of which
1007 /// will be executed once the scope is exited.
1008 class RunCleanupsScope {
1009 EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
1010
1011 protected:
1014
1015 private:
1016 RunCleanupsScope(const RunCleanupsScope &) = delete;
1017 void operator=(const RunCleanupsScope &) = delete;
1018
1019 protected:
1021
1022 public:
1023 /// Enter a new cleanup scope.
1025 : performCleanup(true), cgf(cgf) {
1026 cleanupStackDepth = cgf.ehStack.stable_begin();
1027 oldDidCallStackSave = cgf.didCallStackSave;
1028 cgf.didCallStackSave = false;
1029 oldCleanupStackDepth = cgf.currentCleanupStackDepth;
1030 cgf.currentCleanupStackDepth = cleanupStackDepth;
1031 }
1032
1033 /// Exit this cleanup scope, emitting any accumulated cleanups.
1035 if (performCleanup)
1036 forceCleanup();
1037 }
1038
1039 /// Force the emission of cleanups now, instead of waiting
1040 /// until this object is destroyed.
1042 assert(performCleanup && "Already forced cleanup");
1043 {
1044 mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
1045 cgf.didCallStackSave = oldDidCallStackSave;
1046 cgf.popCleanupBlocks(cleanupStackDepth);
1047 performCleanup = false;
1048 cgf.currentCleanupStackDepth = oldCleanupStackDepth;
1049 }
1050 }
1051 };
1052
1053 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1055
1056public:
1057 /// Represents a scope, including function bodies, compound statements, and
1058 /// the substatements of if/while/do/for/switch/try statements. This class
1059 /// handles any automatic cleanup, along with the return value.
1060 struct LexicalScope : public RunCleanupsScope {
1061 private:
1062 // Block containing cleanup code for things initialized in this
1063 // lexical context (scope).
1064 mlir::Block *cleanupBlock = nullptr;
1065
1066 // Points to the scope entry block. This is useful, for instance, for
1067 // helping to insert allocas before finalizing any recursive CodeGen from
1068 // switches.
1069 mlir::Block *entryBlock;
1070
1071 LexicalScope *parentScope = nullptr;
1072
1073 // Holds the actual value for ScopeKind::Try
1074 cir::TryOp tryOp = nullptr;
1075
1076 // On a coroutine body, the OnFallthrough sub stmt holds the handler
1077 // (CoreturnStmt) for control flow falling off the body. Keep track
1078 // of emitted co_return in this scope and allow OnFallthrough to be
1079 // skipeed.
1080 bool hasCoreturnStmt = false;
1081
1082 // Only Regular is used at the moment. Support for other kinds will be
1083 // added as the relevant statements/expressions are upstreamed.
1084 enum Kind {
1085 Regular, // cir.if, cir.scope, if_regions
1086 Ternary, // cir.ternary
1087 Switch, // cir.switch
1088 Try, // cir.try
1089 GlobalInit // cir.global initialization code
1090 };
1091 Kind scopeKind = Kind::Regular;
1092
1093 // The scope return value.
1094 mlir::Value retVal = nullptr;
1095
1096 mlir::Location beginLoc;
1097 mlir::Location endLoc;
1098
1099 public:
1100 unsigned depth = 0;
1101
1102 LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
1103 : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
1104 beginLoc(loc), endLoc(loc) {
1105
1106 assert(entryBlock && "LexicalScope requires an entry block");
1107 cgf.curLexScope = this;
1108 if (parentScope)
1109 ++depth;
1110
1111 if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
1112 assert(fusedLoc.getLocations().size() == 2 && "too many locations");
1113 beginLoc = fusedLoc.getLocations()[0];
1114 endLoc = fusedLoc.getLocations()[1];
1115 }
1116 }
1117
1118 void setRetVal(mlir::Value v) { retVal = v; }
1119
1120 void cleanup();
1121 void restore() { cgf.curLexScope = parentScope; }
1122
1125 cleanup();
1126 restore();
1127 }
1128
1129 // ---
1130 // Coroutine tracking
1131 // ---
1132 bool hasCoreturn() const { return hasCoreturnStmt; }
1133 void setCoreturn() { hasCoreturnStmt = true; }
1134
1135 // ---
1136 // Kind
1137 // ---
1138 bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
1139 bool isRegular() { return scopeKind == Kind::Regular; }
1140 bool isSwitch() { return scopeKind == Kind::Switch; }
1141 bool isTernary() { return scopeKind == Kind::Ternary; }
1142 bool isTry() { return scopeKind == Kind::Try; }
1143 cir::TryOp getClosestTryParent();
1144 void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
1145 void setAsSwitch() { scopeKind = Kind::Switch; }
1146 void setAsTernary() { scopeKind = Kind::Ternary; }
1147 void setAsTry(cir::TryOp op) {
1148 scopeKind = Kind::Try;
1149 tryOp = op;
1150 }
1151
1152 // Lazy create cleanup block or return what's available.
1153 mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
1154 if (cleanupBlock)
1155 return cleanupBlock;
1156 cleanupBlock = createCleanupBlock(builder);
1157 return cleanupBlock;
1158 }
1159
1160 cir::TryOp getTry() {
1161 assert(isTry());
1162 return tryOp;
1163 }
1164
1165 mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
1166 return cleanupBlock;
1167 }
1168
1169 mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
1170 // Create the cleanup block but dont hook it up around just yet.
1171 mlir::OpBuilder::InsertionGuard guard(builder);
1172 mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
1173 : &cgf.curFn->getRegion(0);
1174 cleanupBlock = builder.createBlock(r);
1175 return cleanupBlock;
1176 }
1177
1178 // ---
1179 // Return handling.
1180 // ---
1181
1182 private:
1183 // On switches we need one return block per region, since cases don't
1184 // have their own scopes but are distinct regions nonetheless.
1185
1186 // TODO: This implementation should change once we have support for early
1187 // exits in MLIR structured control flow (llvm-project#161575)
1189 llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
1190 llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
1191 std::optional<unsigned> normalRetBlockIndex;
1192
1193 // There's usually only one ret block per scope, but this needs to be
1194 // get or create because of potential unreachable return statements, note
1195 // that for those, all source location maps to the first one found.
1196 mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1197 assert((isa_and_nonnull<cir::CaseOp>(
1198 cgf.builder.getBlock()->getParentOp()) ||
1199 retBlocks.size() == 0) &&
1200 "only switches can hold more than one ret block");
1201
1202 // Create the return block but don't hook it up just yet.
1203 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
1204 auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
1205 retBlocks.push_back(b);
1206 updateRetLoc(b, loc);
1207 return b;
1208 }
1209
1210 cir::ReturnOp emitReturn(mlir::Location loc);
1211 void emitImplicitReturn();
1212
1213 public:
1215 mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
1216 void updateRetLoc(mlir::Block *b, mlir::Location loc) {
1217 retLocs.insert_or_assign(b, loc);
1218 }
1219
1220 mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1221 // Check if we're inside a case region
1222 if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
1223 cgf.builder.getBlock()->getParentOp())) {
1224 auto iter = retBlockInCaseIndex.find(caseOp);
1225 if (iter != retBlockInCaseIndex.end()) {
1226 // Reuse existing return block
1227 mlir::Block *ret = retBlocks[iter->second];
1228 updateRetLoc(ret, loc);
1229 return ret;
1230 }
1231 // Create new return block
1232 mlir::Block *ret = createRetBlock(cgf, loc);
1233 retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
1234 return ret;
1235 }
1236
1237 if (normalRetBlockIndex) {
1238 mlir::Block *ret = retBlocks[*normalRetBlockIndex];
1239 updateRetLoc(ret, loc);
1240 return ret;
1241 }
1242
1243 mlir::Block *ret = createRetBlock(cgf, loc);
1244 normalRetBlockIndex = retBlocks.size() - 1;
1245 return ret;
1246 }
1247
1248 mlir::Block *getEntryBlock() { return entryBlock; }
1249 };
1250
1252
1253 typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
1254
1256
1257 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
1258 QualType type);
1259
1261 Destroyer *destroyer);
1262
1264
1265 /// ----------------------
1266 /// CIR emit functions
1267 /// ----------------------
1268public:
1269 bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr,
1271 clang::SVETypeFlags typeFlags);
1272 std::optional<mlir::Value>
1273 emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
1275 llvm::Triple::ArchType arch);
1276 std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
1277 const CallExpr *expr);
1278 std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
1279 const CallExpr *expr);
1280
1281 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
1282 SourceLocation loc,
1283 SourceLocation assumptionLoc,
1284 int64_t alignment,
1285 mlir::Value offsetValue = nullptr);
1286
1287 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
1288 SourceLocation assumptionLoc,
1289 int64_t alignment,
1290 mlir::Value offsetValue = nullptr);
1291
1292private:
1293 void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
1294 clang::CharUnits alignment);
1295
1296 CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
1297
1298public:
1300 llvm::StringRef fieldName,
1301 unsigned fieldIndex);
1302
1303 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1304 mlir::Location loc, clang::CharUnits alignment,
1305 bool insertIntoFnEntryBlock,
1306 mlir::Value arraySize = nullptr);
1307 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1308 mlir::Location loc, clang::CharUnits alignment,
1309 mlir::OpBuilder::InsertPoint ip,
1310 mlir::Value arraySize = nullptr);
1311
1312 void emitAggregateStore(mlir::Value value, Address dest);
1313
1314 void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
1315
1317
1319
1320 /// Emit an aggregate copy.
1321 ///
1322 /// \param isVolatile \c true iff either the source or the destination is
1323 /// volatile.
1324 /// \param MayOverlap Whether the tail padding of the destination might be
1325 /// occupied by some other object. More efficient code can often be
1326 /// generated if not.
1327 void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
1328 AggValueSlot::Overlap_t mayOverlap,
1329 bool isVolatile = false);
1330
1331 /// Emit code to compute the specified expression which can have any type. The
1332 /// result is returned as an RValue struct. If this is an aggregate
1333 /// expression, the aggloc/agglocvolatile arguments indicate where the result
1334 /// should be returned.
1337 bool ignoreResult = false);
1338
1339 /// Emits the code necessary to evaluate an arbitrary expression into the
1340 /// given memory location.
1341 void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
1342 bool isInitializer);
1343
1344 /// Similarly to emitAnyExpr(), however, the result will always be accessible
1345 /// even if no aggregate location is provided.
1347
1348 void emitAnyExprToExn(const Expr *e, Address addr);
1349
1350 void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
1351 QualType elementType, CharUnits elementAlign,
1352 Destroyer *destroyer);
1353
1354 mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
1355 QualType &baseType, Address &addr);
1357
1359
1361 LValueBaseInfo *baseInfo = nullptr);
1362
1363 std::pair<mlir::Value, mlir::Type>
1365 QualType inputType, std::string &constraintString,
1366 SourceLocation loc);
1367 std::pair<mlir::Value, mlir::Type>
1368 emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr,
1369 std::string &constraintString);
1370 mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
1371
1373 void emitAtomicInit(Expr *init, LValue dest);
1374 void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
1375 void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
1376 bool isVolatile, bool isInit);
1378 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
1379 llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
1380
1381 AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
1382 mlir::OpBuilder::InsertPoint ip = {});
1383
1384 /// Emit code and set up symbol table for a variable declaration with auto,
1385 /// register, or no storage class specifier. These turn into simple stack
1386 /// objects, globals depending on target.
1387 void emitAutoVarDecl(const clang::VarDecl &d);
1388
1389 void emitAutoVarCleanups(const AutoVarEmission &emission);
1390 /// Emit the initializer for an allocated variable. If this call is not
1391 /// associated with the call to emitAutoVarAlloca (as the address of the
1392 /// emission is not directly an alloca), the allocatedSeparately parameter can
1393 /// be used to suppress the assertions. However, this should only be used in
1394 /// extreme cases, as it doesn't properly reflect the language/AST.
1395 void emitAutoVarInit(const AutoVarEmission &emission);
1396 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1398
1399 void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
1400
1401 void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
1402 CXXCtorInitializer *baseInit);
1403
1405
1406 cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
1407
1408 mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
1409
1410 RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
1411 const clang::CallExpr *e, ReturnValueSlot returnValue);
1412
1413 /// Returns a Value corresponding to the size of the given expression by
1414 /// emitting a `cir.objsize` operation.
1415 ///
1416 /// \param e The expression whose object size to compute
1417 /// \param type Determines the semantics of the object size computation.
1418 /// The type parameter is a 2-bit value where:
1419 /// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
1420 /// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
1421 /// \param resType The result type for the size value
1422 /// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
1423 /// call `cir.objsize` on this value rather than emitting e.
1424 /// \param isDynamic If true, allows runtime evaluation via dynamic mode
1425 mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
1426 cir::IntType resType, mlir::Value emittedE,
1427 bool isDynamic);
1428
1429 mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
1430 unsigned type,
1431 cir::IntType resType,
1432 mlir::Value emittedE,
1433 bool isDynamic);
1434
1435 int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
1436
1438
1439 RValue emitCall(const CIRGenFunctionInfo &funcInfo,
1440 const CIRGenCallee &callee, ReturnValueSlot returnValue,
1441 const CallArgList &args, cir::CIRCallOpInterface *callOp,
1442 mlir::Location loc);
1445 const CallArgList &args,
1446 cir::CIRCallOpInterface *callOrTryCall = nullptr) {
1447 assert(currSrcLoc && "source location must have been set");
1448 return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
1449 *currSrcLoc);
1450 }
1451
1452 RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
1454 void emitCallArg(CallArgList &args, const clang::Expr *e,
1455 clang::QualType argType);
1456 void emitCallArgs(
1457 CallArgList &args, PrototypeWrapper prototype,
1458 llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
1459 AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
1464
1465 template <typename T>
1466 mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
1467 mlir::ArrayAttr value,
1468 cir::CaseOpKind kind,
1469 bool buildingTopLevelCase);
1470
1471 mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
1472 mlir::Type condType,
1473 bool buildingTopLevelCase);
1474
1475 LValue emitCastLValue(const CastExpr *e);
1476
1477 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
1478 /// sanitizer is enabled, a runtime check is also emitted.
1479 mlir::Value emitCheckedArgForAssume(const Expr *e);
1480
1481 /// Emit a conversion from the specified complex type to the specified
1482 /// destination type, where the destination type is an LLVM scalar type.
1483 mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
1484 QualType dstTy, SourceLocation loc);
1485
1488
1490
1491 mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
1492 cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1493 cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1494 cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
1495 cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
1496 mlir::Value coroframeAddr);
1498
1499 void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
1500
1502
1503 mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
1504
1505 mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s);
1506
1508 AggValueSlot dest);
1509
1512 Address arrayBegin, const CXXConstructExpr *e,
1513 bool newPointerIsChecked,
1514 bool zeroInitialize = false);
1516 mlir::Value numElements, Address arrayBase,
1517 const CXXConstructExpr *e,
1518 bool newPointerIsChecked,
1519 bool zeroInitialize);
1521 clang::CXXCtorType type, bool forVirtualBase,
1522 bool delegating, AggValueSlot thisAVS,
1523 const clang::CXXConstructExpr *e);
1524
1526 clang::CXXCtorType type, bool forVirtualBase,
1527 bool delegating, Address thisAddr,
1529
1530 void emitCXXDeleteExpr(const CXXDeleteExpr *e);
1531
1533 bool forVirtualBase, bool delegating,
1534 Address thisAddr, QualType thisTy);
1535
1537 mlir::Value thisVal, QualType thisTy,
1538 mlir::Value implicitParam,
1539 QualType implicitParamTy, const CallExpr *e);
1540
1541 mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
1543
1546
1548 const Expr *e, Address base, mlir::Value memberPtr,
1549 const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
1550
1552 const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
1553 ReturnValueSlot returnValue, mlir::Value thisPtr,
1554 mlir::Value implicitParam, clang::QualType implicitParamTy,
1555 const clang::CallExpr *ce, CallArgList *rtlArgs);
1556
1558 const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
1559 ReturnValueSlot returnValue, bool hasQualifier,
1560 clang::NestedNameSpecifier qualifier, bool isArrow,
1561 const clang::Expr *base);
1562
1565
1566 mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
1567
1568 void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
1569 mlir::Type elementTy, Address beginPtr,
1570 mlir::Value numElements,
1571 mlir::Value allocSizeWithoutCookie);
1572
1574 const CXXMethodDecl *md,
1576
1578
1580 const CallExpr *callExpr,
1582
1583 void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
1584 Address ptr);
1585
1586 void emitCXXThrowExpr(const CXXThrowExpr *e);
1587
1588 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
1589
1590 mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s);
1591
1592 void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
1593 bool isFnTryBlock = false);
1594
1595 void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock = false);
1596
1598 clang::CXXCtorType ctorType, FunctionArgList &args);
1599
1600 // It's important not to confuse this and emitDelegateCXXConstructorCall.
1601 // Delegating constructors are the C++11 feature. The constructor delegate
1602 // optimization is used to reduce duplication in the base and complete
1603 // constructors where they are substantially the same.
1605 const FunctionArgList &args);
1606
1607 void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
1608 QualType deleteTy);
1609
1610 mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
1611
1612 mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
1613
1614 /// Emit an expression as an initializer for an object (variable, field, etc.)
1615 /// at the given location. The expression is not necessarily the normal
1616 /// initializer for the object, and the address is not necessarily
1617 /// its normal location.
1618 ///
1619 /// \param init the initializing expression
1620 /// \param d the object to act as if we're initializing
1621 /// \param lvalue the lvalue to initialize
1622 /// \param capturedByInit true if \p d is a __block variable whose address is
1623 /// potentially changed by the initializer
1624 void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
1625 LValue lvalue, bool capturedByInit = false);
1626
1627 mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
1628
1629 mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
1630
1631 mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
1632
1634
1636 clang::Expr *init);
1637
1639
1640 mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
1641
1642 mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
1643
1644 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
1645
1646 void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
1647
1648 mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
1649 llvm::ArrayRef<mlir::Value> args = {});
1650
1651 void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc);
1652
1653 /// Emit the computation of the specified expression of scalar type.
1654 mlir::Value emitScalarExpr(const clang::Expr *e,
1655 bool ignoreResultAssign = false);
1656
1657 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
1658 cir::UnaryOpKind kind, bool isPre);
1659
1660 /// Build a debug stoppoint if we are emitting debug info.
1661 void emitStopPoint(const Stmt *s);
1662
1663 // Build CIR for a statement. useCurrentScope should be true if no
1664 // new scopes need be created when finding a compound statement.
1665 mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
1666 llvm::ArrayRef<const Attr *> attrs = {});
1667
1668 mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
1669 bool useCurrentScope);
1670
1671 mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
1672
1673 void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
1674 CallArgList &callArgs);
1675
1676 RValue emitCoawaitExpr(const CoawaitExpr &e,
1677 AggValueSlot aggSlot = AggValueSlot::ignored(),
1678 bool ignoreResult = false);
1679
1680 RValue emitCoyieldExpr(const CoyieldExpr &e,
1681 AggValueSlot aggSlot = AggValueSlot::ignored(),
1682 bool ignoreResult = false);
1683 /// Emit the computation of the specified expression of complex type,
1684 /// returning the result.
1685 mlir::Value emitComplexExpr(const Expr *e);
1686
1687 void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
1688
1689 mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv,
1690 cir::UnaryOpKind op, bool isPre);
1691
1695 mlir::Value &result);
1696
1697 mlir::LogicalResult
1698 emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
1699 AggValueSlot slot = AggValueSlot::ignored());
1700
1701 mlir::LogicalResult
1703 Address *lastValue = nullptr,
1704 AggValueSlot slot = AggValueSlot::ignored());
1705
1706 void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
1707 mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
1708 LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
1709
1710 mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
1711 mlir::Type condType,
1712 bool buildingTopLevelCase);
1713
1715 clang::CXXCtorType ctorType,
1716 const FunctionArgList &args,
1718
1719 /// We are performing a delegate call; that is, the current function is
1720 /// delegating to another one. Produce a r-value suitable for passing the
1721 /// given parameter.
1722 void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
1724
1725 /// Emit an `if` on a boolean condition to the specified blocks.
1726 /// FIXME: Based on the condition, this might try to simplify the codegen of
1727 /// the conditional based on the branch.
1728 /// In the future, we may apply code generation simplifications here,
1729 /// similar to those used in classic LLVM codegen
1730 /// See `EmitBranchOnBoolExpr` for inspiration.
1731 mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
1732 const clang::Stmt *thenS,
1733 const clang::Stmt *elseS);
1734 cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
1735 BuilderCallbackRef thenBuilder,
1736 mlir::Location thenLoc,
1737 BuilderCallbackRef elseBuilder,
1738 std::optional<mlir::Location> elseLoc = {});
1739
1740 mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
1741
1742 LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
1743
1744 mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
1745 mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
1746
1747 void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
1748 void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
1749
1750 void populateCatchHandlers(cir::TryOp tryOp);
1751
1752 mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
1753
1754 /// Emit code to compute the specified expression,
1755 /// ignoring the result.
1756 void emitIgnoredExpr(const clang::Expr *e);
1757
1758 RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
1759
1760 /// Load a complex number from the specified l-value.
1761 mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
1762
1763 RValue emitLoadOfExtVectorElementLValue(LValue lv);
1764
1765 /// Given an expression that represents a value lvalue, this method emits
1766 /// the address of the lvalue, then loads the result as an rvalue,
1767 /// returning the rvalue.
1768 RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
1769
1770 Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
1771 LValueBaseInfo *pointeeBaseInfo);
1772 LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
1773 QualType refTy, AlignmentSource source);
1774
1775 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1776 /// care to appropriately convert from the memory representation to
1777 /// the LLVM value representation. The l-value must be a simple
1778 /// l-value.
1779 mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
1780 mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
1781 SourceLocation loc, LValueBaseInfo baseInfo);
1782
1783 /// Emit code to compute a designator that specifies the location
1784 /// of the expression.
1785 /// FIXME: document this function better.
1786 LValue emitLValue(const clang::Expr *e);
1787 LValue emitLValueForBitField(LValue base, const FieldDecl *field);
1788 LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
1789
1790 LValue emitLValueForLambdaField(const FieldDecl *field);
1791 LValue emitLValueForLambdaField(const FieldDecl *field,
1792 mlir::Value thisValue);
1793
1794 /// Like emitLValueForField, excpet that if the Field is a reference, this
1795 /// will return the address of the reference and not the address of the value
1796 /// stored in the reference.
1797 LValue emitLValueForFieldInitialization(LValue base,
1798 const clang::FieldDecl *field,
1799 llvm::StringRef fieldName);
1800
1801 LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
1802
1803 LValue emitMemberExpr(const MemberExpr *e);
1804
1805 LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
1806
1807 LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
1808
1809 /// Given an expression with a pointer type, emit the value and compute our
1810 /// best estimate of the alignment of the pointee.
1811 ///
1812 /// One reasonable way to use this information is when there's a language
1813 /// guarantee that the pointer must be aligned to some stricter value, and
1814 /// we're simply trying to ensure that sufficiently obvious uses of under-
1815 /// aligned objects don't get miscompiled; for example, a placement new
1816 /// into the address of a local variable. In such a case, it's quite
1817 /// reasonable to just ignore the returned alignment when it isn't from an
1818 /// explicit source.
1820 LValueBaseInfo *baseInfo = nullptr);
1821
1822 /// Emits a reference binding to the passed in expression.
1823 RValue emitReferenceBindingToExpr(const Expr *e);
1824
1825 mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
1826
1827 RValue emitRotate(const CallExpr *e, bool isRotateLeft);
1828
1829 mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
1830
1831 /// Emit a conversion from the specified type to the specified destination
1832 /// type, both of which are CIR scalar types.
1833 mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
1834 clang::QualType dstType,
1836
1837 void emitScalarInit(const clang::Expr *init, mlir::Location loc,
1838 LValue lvalue, bool capturedByInit = false);
1839
1840 mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
1841 const Expr *argExpr);
1842
1843 void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
1844
1845 void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
1846 bool isInit);
1847
1848 void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
1849 clang::QualType ty, LValueBaseInfo baseInfo,
1850 bool isInit = false, bool isNontemporal = false);
1851 void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
1852
1853 /// Store the specified rvalue into the specified
1854 /// lvalue, where both are guaranteed to the have the same type, and that type
1855 /// is 'Ty'.
1856 void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
1857
1858 mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
1859
1860 LValue emitStringLiteralLValue(const StringLiteral *e,
1861 llvm::StringRef name = ".str");
1862
1863 mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
1864 mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
1865 bool buildingTopLevelCase);
1866 mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
1867
1868 std::optional<mlir::Value>
1869 emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
1870 ReturnValueSlot &returnValue);
1871
1872 /// Given a value and its clang type, returns the value casted to its memory
1873 /// representation.
1874 /// Note: CIR defers most of the special casting to the final lowering passes
1875 /// to conserve the high level information.
1876 mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
1877
1878 /// EmitFromMemory - Change a scalar value from its memory
1879 /// representation to its value representation.
1880 mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty);
1881
1882 /// Emit a trap instruction, which is used to abort the program in an abnormal
1883 /// way, usually for debugging purposes.
1884 /// \p createNewBlock indicates whether to create a new block for the IR
1885 /// builder. Since the `cir.trap` operation is a terminator, operations that
1886 /// follow a trap cannot be emitted after `cir.trap` in the same block. To
1887 /// ensure these operations get emitted successfully, you need to create a new
1888 /// dummy block and set the insertion point there before continuing from the
1889 /// trap operation.
1890 void emitTrap(mlir::Location loc, bool createNewBlock);
1891
1892 LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
1893
1894 mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
1895
1896 /// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
1897 /// checking is enabled. Otherwise, just emit an unreachable instruction.
1898 /// \p createNewBlock indicates whether to create a new block for the IR
1899 /// builder. Since the `cir.unreachable` operation is a terminator, operations
1900 /// that follow an unreachable point cannot be emitted after `cir.unreachable`
1901 /// in the same block. To ensure these operations get emitted successfully,
1902 /// you need to create a dummy block and set the insertion point there before
1903 /// continuing from the unreachable point.
1904 void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
1905
1906 /// This method handles emission of any variable declaration
1907 /// inside a function, including static vars etc.
1908 void emitVarDecl(const clang::VarDecl &d);
1909
1910 void emitVariablyModifiedType(QualType ty);
1911
1912 mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
1913
1914 std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
1915 const CallExpr *expr);
1916
1917 /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
1918 /// nonnull, if 1\p LHS is marked _Nonnull.
1919 void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
1921
1922 /// An object to manage conditionally-evaluated expressions.
1924 CIRGenFunction &cgf;
1925 mlir::OpBuilder::InsertPoint insertPt;
1926
1927 public:
1929 : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
1930 ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
1931 : cgf(cgf), insertPt(ip) {}
1932
1934 assert(cgf.outermostConditional != this);
1935 if (!cgf.outermostConditional)
1936 cgf.outermostConditional = this;
1937 }
1938
1940 assert(cgf.outermostConditional != nullptr);
1941 if (cgf.outermostConditional == this)
1942 cgf.outermostConditional = nullptr;
1943 }
1944
1945 /// Returns the insertion point which will be executed prior to each
1946 /// evaluation of the conditional code. In LLVM OG, this method
1947 /// is called getStartingBlock.
1948 mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
1949 };
1950
1952 std::optional<LValue> lhs{}, rhs{};
1953 mlir::Value result{};
1954 };
1955
1956 // Return true if we're currently emitting one branch or the other of a
1957 // conditional expression.
1958 bool isInConditionalBranch() const { return outermostConditional != nullptr; }
1959
1960 void setBeforeOutermostConditional(mlir::Value value, Address addr) {
1961 assert(isInConditionalBranch());
1962 {
1963 mlir::OpBuilder::InsertionGuard guard(builder);
1964 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
1965 builder.createStore(
1966 value.getLoc(), value, addr, /*isVolatile=*/false,
1967 mlir::IntegerAttr::get(
1968 mlir::IntegerType::get(value.getContext(), 64),
1969 (uint64_t)addr.getAlignment().getAsAlign().value()));
1970 }
1971 }
1972
1973 // Points to the outermost active conditional control. This is used so that
1974 // we know if a temporary should be destroyed conditionally.
1976
1977 /// An RAII object to record that we're evaluating a statement
1978 /// expression.
1980 CIRGenFunction &cgf;
1981
1982 /// We have to save the outermost conditional: cleanups in a
1983 /// statement expression aren't conditional just because the
1984 /// StmtExpr is.
1985 ConditionalEvaluation *savedOutermostConditional;
1986
1987 public:
1989 : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
1990 cgf.outermostConditional = nullptr;
1991 }
1992
1994 cgf.outermostConditional = savedOutermostConditional;
1995 }
1996 };
1997
1998 template <typename FuncTy>
1999 ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
2000 const FuncTy &branchGenFunc);
2001
2002 mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
2003 const clang::Stmt *thenS,
2004 const clang::Stmt *elseS);
2005
2006 /// Build a "reference" to a va_list; this is either the address or the value
2007 /// of the expression, depending on how va_list is defined.
2008 Address emitVAListRef(const Expr *e);
2009
2010 /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
2011 ///
2012 /// \param vaList A reference to the \c va_list as emitted by either
2013 /// \c emitVAListRef or \c emitMSVAListRef.
2014 ///
2015 /// \param count The number of arguments in \c vaList
2016 void emitVAStart(mlir::Value vaList, mlir::Value count);
2017
2018 /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
2019 ///
2020 /// \param vaList A reference to the \c va_list as emitted by either
2021 /// \c emitVAListRef or \c emitMSVAListRef.
2022 void emitVAEnd(mlir::Value vaList);
2023
2024 /// Generate code to get an argument from the passed in pointer
2025 /// and update it accordingly.
2026 ///
2027 /// \param ve The \c VAArgExpr for which to generate code.
2028 ///
2029 /// \param vaListAddr Receives a reference to the \c va_list as emitted by
2030 /// either \c emitVAListRef or \c emitMSVAListRef.
2031 ///
2032 /// \returns SSA value with the argument.
2033 mlir::Value emitVAArg(VAArgExpr *ve);
2034
2035 /// ----------------------
2036 /// CIR build helpers
2037 /// -----------------
2038public:
2039 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2040 const Twine &name = "tmp",
2041 mlir::Value arraySize = nullptr,
2042 bool insertIntoFnEntryBlock = false);
2043 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2044 const Twine &name = "tmp",
2045 mlir::OpBuilder::InsertPoint ip = {},
2046 mlir::Value arraySize = nullptr);
2047 Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
2048 const Twine &name = "tmp",
2049 mlir::Value arraySize = nullptr,
2050 Address *alloca = nullptr,
2051 mlir::OpBuilder::InsertPoint ip = {});
2052 Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
2053 mlir::Location loc,
2054 const Twine &name = "tmp",
2055 mlir::Value arraySize = nullptr,
2056 mlir::OpBuilder::InsertPoint ip = {});
2057
2058 /// Create a temporary memory object of the given type, with
2059 /// appropriate alignmen and cast it to the default address space. Returns
2060 /// the original alloca instruction by \p Alloca if it is not nullptr.
2061 Address createMemTemp(QualType t, mlir::Location loc,
2062 const Twine &name = "tmp", Address *alloca = nullptr,
2063 mlir::OpBuilder::InsertPoint ip = {});
2064 Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
2065 const Twine &name = "tmp", Address *alloca = nullptr,
2066 mlir::OpBuilder::InsertPoint ip = {});
2067
2068 //===--------------------------------------------------------------------===//
2069 // OpenMP Emission
2070 //===--------------------------------------------------------------------===//
2071public:
2072 mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
2073 mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
2074 mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
2075 mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
2076 mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
2077 mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
2078 mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
2079 mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
2080 mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
2081 mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
2082 mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
2083 mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
2084 mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
2085 mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
2086 mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
2087 mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
2088 mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
2089 mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
2090 mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
2091 mlir::LogicalResult
2092 emitOMPParallelForDirective(const OMPParallelForDirective &s);
2093 mlir::LogicalResult
2094 emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
2095 mlir::LogicalResult
2096 emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
2097 mlir::LogicalResult
2098 emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
2099 mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
2100 mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
2101 mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
2102 mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
2103 mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
2104 mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
2105 mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
2106 mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
2107 mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
2108 mlir::LogicalResult
2109 emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s);
2110 mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
2111 mlir::LogicalResult
2112 emitOMPTargetDataDirective(const OMPTargetDataDirective &s);
2113 mlir::LogicalResult
2114 emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s);
2115 mlir::LogicalResult
2116 emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s);
2117 mlir::LogicalResult
2118 emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s);
2119 mlir::LogicalResult
2120 emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s);
2121 mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
2122 mlir::LogicalResult
2123 emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s);
2124 mlir::LogicalResult
2125 emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s);
2126 mlir::LogicalResult
2127 emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s);
2128 mlir::LogicalResult
2129 emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s);
2130 mlir::LogicalResult
2131 emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s);
2132 mlir::LogicalResult
2133 emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s);
2134 mlir::LogicalResult
2135 emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
2136 mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
2137 const OMPParallelMaskedTaskLoopDirective &s);
2139 const OMPParallelMaskedTaskLoopSimdDirective &s);
2140 mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
2141 const OMPParallelMasterTaskLoopDirective &s);
2143 const OMPParallelMasterTaskLoopSimdDirective &s);
2144 mlir::LogicalResult
2145 emitOMPDistributeDirective(const OMPDistributeDirective &s);
2146 mlir::LogicalResult emitOMPDistributeParallelForDirective(
2147 const OMPDistributeParallelForDirective &s);
2149 const OMPDistributeParallelForSimdDirective &s);
2150 mlir::LogicalResult
2151 emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s);
2153 const OMPTargetParallelGenericLoopDirective &s);
2154 mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
2155 const OMPTargetParallelForSimdDirective &s);
2156 mlir::LogicalResult
2157 emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s);
2158 mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
2159 const OMPTargetTeamsGenericLoopDirective &s);
2160 mlir::LogicalResult
2161 emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s);
2162 mlir::LogicalResult
2163 emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s);
2164 mlir::LogicalResult
2165 emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s);
2167 const OMPTeamsDistributeParallelForSimdDirective &s);
2169 const OMPTeamsDistributeParallelForDirective &s);
2170 mlir::LogicalResult
2171 emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s);
2172 mlir::LogicalResult
2173 emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s);
2174 mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
2175 const OMPTargetTeamsDistributeDirective &s);
2177 const OMPTargetTeamsDistributeParallelForDirective &s);
2179 const OMPTargetTeamsDistributeParallelForSimdDirective &s);
2181 const OMPTargetTeamsDistributeSimdDirective &s);
2182 mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
2183 mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
2184 mlir::LogicalResult
2185 emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s);
2186 mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
2187 mlir::LogicalResult
2188 emitOMPInterchangeDirective(const OMPInterchangeDirective &s);
2189 mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
2190 mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
2191 mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
2192
2193 void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d);
2194 void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d);
2195 void emitOMPCapturedExpr(const OMPCapturedExprDecl &d);
2196 void emitOMPAllocateDecl(const OMPAllocateDecl &d);
2197 void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d);
2198 void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d);
2199 void emitOMPRequiresDecl(const OMPRequiresDecl &d);
2200
2201private:
2202 template <typename Op>
2203 void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
2204
2205 //===--------------------------------------------------------------------===//
2206 // OpenACC Emission
2207 //===--------------------------------------------------------------------===//
2208private:
2209 template <typename Op>
2210 Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
2211 llvm::ArrayRef<const OpenACCClause *> clauses);
2212 // Function to do the basic implementation of an operation with an Associated
2213 // Statement. Models AssociatedStmtConstruct.
2214 template <typename Op, typename TermOp>
2215 mlir::LogicalResult
2216 emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
2217 OpenACCDirectiveKind dirKind,
2218 llvm::ArrayRef<const OpenACCClause *> clauses,
2219 const Stmt *associatedStmt);
2220
2221 template <typename Op, typename TermOp>
2222 mlir::LogicalResult emitOpenACCOpCombinedConstruct(
2223 mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
2224 llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
2225
2226 template <typename Op>
2227 void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
2228 ArrayRef<const OpenACCClause *> clauses);
2229 // The second template argument doesn't need to be a template, since it should
2230 // always be an mlir::acc::LoopOp, but as this is a template anyway, we make
2231 // it a template argument as this way we can avoid including the OpenACC MLIR
2232 // headers here. We will count on linker failures/explicit instantiation to
2233 // ensure we don't mess this up, but it is only called from 1 place, and
2234 // instantiated 3x.
2235 template <typename ComputeOp, typename LoopOp>
2236 void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
2237 OpenACCDirectiveKind dirKind,
2238 ArrayRef<const OpenACCClause *> clauses);
2239
2240 // The OpenACC LoopOp requires that we have auto, seq, or independent on all
2241 // LoopOp operations for the 'none' device type case. This function checks if
2242 // the LoopOp has one, else it updates it to have one.
2243 void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
2245
2246 // The OpenACC 'cache' construct actually applies to the 'loop' if present. So
2247 // keep track of the 'loop' so that we can add the cache vars to it correctly.
2248 mlir::acc::LoopOp *activeLoopOp = nullptr;
2249
2250 struct ActiveOpenACCLoopRAII {
2251 CIRGenFunction &cgf;
2252 mlir::acc::LoopOp *oldLoopOp;
2253
2254 ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
2255 : cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
2256 cgf.activeLoopOp = newOp;
2257 }
2258 ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
2259 };
2260
2261 // Keep track of the last place we inserted a 'recipe' so that we can insert
2262 // the next one in lexical order.
2263 mlir::OpBuilder::InsertPoint lastRecipeLocation;
2264
2265public:
2266 // Helper type used to store the list of important information for a 'data'
2267 // clause variable, or a 'cache' variable reference.
2269 mlir::Location beginLoc;
2270 mlir::Value varValue;
2271 std::string name;
2272 // The type of the original variable reference: that is, after 'bounds' have
2273 // removed pointers/array types/etc. So in the case of int arr[5], and a
2274 // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
2278 // The list of types that we found when going through the bounds, which we
2279 // can use to properly set the alloca section.
2281 };
2282
2283 // Gets the collection of info required to lower and OpenACC clause or cache
2284 // construct variable reference.
2286 // Helper function to emit the integer expressions as required by an OpenACC
2287 // clause/construct.
2288 mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
2289 // Helper function to emit an integer constant as an mlir int type, used for
2290 // constants in OpenACC constructs/clauses.
2291 mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
2292 int64_t value);
2293
2294 mlir::LogicalResult
2296 mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
2297 mlir::LogicalResult
2299 mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
2300 mlir::LogicalResult
2302 mlir::LogicalResult
2304 mlir::LogicalResult
2306 mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
2307 mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
2308 mlir::LogicalResult
2310 mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
2311 mlir::LogicalResult
2313 mlir::LogicalResult
2315 mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
2316
2319
2320 /// Create a temporary memory object for the given aggregate type.
2321 AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
2322 const Twine &name = "tmp",
2323 Address *alloca = nullptr) {
2325 return AggValueSlot::forAddr(
2326 createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
2329 }
2330
2331private:
2332 QualType getVarArgType(const Expr *arg);
2333};
2334
2335} // namespace clang::CIRGen
2336
2337#endif
Defines the clang::ASTContext interface.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
Enumerates target-specific builtins in their own namespaces within namespace clang.
C Language Family Type Representation.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
Represents a member of a struct/union/class.
Definition Decl.h:3160
This class represents a 'loop' construct. The 'loop' construct applies to a 'for' loop (or range-for ...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4353
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3723
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3269
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6880
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition Expr.h:4453
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition Expr.h:4491
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition Expr.h:4488
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
BreakStmt - This represents a break.
Definition Stmt.h:3127
mlir::Value getPointer() const
Definition Address.h:95
static Address invalid()
Definition Address.h:73
clang::CharUnits getAlignment() const
Definition Address.h:135
mlir::Value getBasePointer() const
Definition Address.h:100
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
AbstractCallee(const clang::FunctionDecl *fd)
const clang::ParmVarDecl * getParamDecl(unsigned I) const
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
An object to manage conditionally-evaluated expressions.
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
mlir::OpBuilder::InsertPoint getInsertPoint() const
Returns the insertion point which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forReference(mlir::TypedAttr c)
static ConstantEmission forValue(mlir::TypedAttr c)
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, LValue lvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
RunCleanupsScope(CIRGenFunction &cgf)
Enter a new cleanup scope.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void restore()
Can be used to restore the state early, before the dtor is run.
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value)
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
static bool hasScalarEvaluationKind(clang::QualType type)
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
void emitOpenACCRoutine(const OpenACCRoutineDecl &d)
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, cir::GlobalOp gv, cir::GetGlobalOp gvAddr)
Add the initializer for 'd' to the global variable that has already been created for it.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard SourceLocExprScopeGuard
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, mlir::OpBuilder::InsertPoint ip={})
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
ImplicitParamDecl * cxxabiThisDecl
CXXThisDecl - When generating code for a C++ member function, this will hold the implicit 'this' decl...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
llvm::ScopedHashTable< const clang::Decl *, mlir::Value > SymTableTy
The symbol table maps a variable name to a value in the current scope.
void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc)
Definition CIRGenCXX.cpp:32
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, CallArgList &callArgs)
mlir::Block * getCurFunctionEntryBlock()
void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp, bool isFnTryBlock=false)
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
void emitOMPRequiresDecl(const OMPRequiresDecl &d)
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
void populateEHCatchRegions(EHScopeStack::stable_iterator scope, cir::TryOp tryOp)
Address cxxDefaultInitExprThis
The value of 'this' to sue when evaluating CXXDefaultInitExprs within this expression.
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock=false)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
void setBeforeOutermostConditional(mlir::Value value, Address addr)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::Value loadCXXThis()
Load the value for 'this'.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
const clang::Decl * curFuncDecl
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
LValue emitLValueForLambdaField(const FieldDecl *field)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOrTryCall=nullptr)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
bool isTrivialInitializer(const Expr *init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void emitOpenACCDeclare(const OpenACCDeclareDecl &d)
Address getAddrOfLocalVar(const clang::VarDecl *vd)
Return the address of a local variable.
void emitAnyExprToExn(const Expr *e, Address addr)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy)
JumpDest returnBlock(mlir::Block *retBlock)
Unified return block.
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, bool delegating)
Return the VTT parameter that should be passed to a base constructor/destructor with virtual bases.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
void initializeVTablePointers(mlir::Location loc, const clang::CXXRecordDecl *rd)
mlir::Type convertType(const TypeDecl *t)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d)
void emitAggregateStore(mlir::Value value, Address dest)
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
ConditionalEvaluation * outermostConditional
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit)
RValue emitAtomicExpr(AtomicExpr *e)
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, LValue lvalue, bool capturedByInit=false)
Emit an expression as an initializer for an object (variable, field, etc.) at the given location.
mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s)
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
void emitVAStart(mlir::Value vaList, mlir::Value count)
Emits the start of a CIR variable-argument operation (cir.va_start)
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass)
const TargetCIRGenInfo & getTargetHooks() const
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
RValue emitCoyieldExpr(const CoyieldExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
JumpDest getJumpDestInCurrentScope(mlir::Block *target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e)
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
CleanupKind getCleanupKind(QualType::DestructionKind kind)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
std::pair< mlir::Value, mlir::Type > emitAsmInputLValue(const TargetInfo::ConstraintInfo &info, LValue inputValue, QualType inputType, std::string &constraintString, SourceLocation loc)
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
void emitOMPAllocateDecl(const OMPAllocateDecl &d)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType)
ImplicitParamDecl * cxxStructorImplicitParamDecl
When generating code for a constructor or destructor, this will hold the implicit argument (e....
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
void populateCatchHandlersIfRequired(cir::TryOp tryOp)
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
void populateUnwindResumeBlock(bool isCleanup, cir::TryOp tryOp)
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
LValue emitAggExprToLValue(const Expr *e)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
clang::CurrentSourceLocExprScope curSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
mlir::Value loadCXXVTT()
Load the VTT parameter to base constructors/destructors have virtual bases.
void emitVarDecl(const clang::VarDecl &d)
This method handles emission of any variable declaration inside a function, including static vars etc...
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr, SmallVectorImpl< mlir::Value > &ops, clang::SVETypeFlags typeFlags)
Address returnValue
The temporary alloca to hold the return value.
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo)
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr, const clang::CXXRecordDecl *vtableClass)
Return the Value of the vtable pointer member pointed to by thisAddr.
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Destroys all the elements of the given array, beginning from last to first.
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
RValue emitAnyExprToTemp(const clang::Expr *e)
Similarly to emitAnyExpr(), however, the result will always be accessible even if no aggregate locati...
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS)
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::SmallPtrSet< const clang::CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
bool hasVolatileMember(QualType t)
returns true if aggregate type has a volatile member.
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
void emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType)
clang::FieldDecl * lambdaThisCaptureField
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
void emitConstructorBody(FunctionArgList &args)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
bool haveInsertPoint() const
True if an insertion point is defined.
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void maybeEmitDeferredVarDeclInit(const VarDecl *vd)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
std::optional< mlir::Value > emitAArch64SMEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void pushStackRestore(CleanupKind kind, Address spMem)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
void emitAutoVarDecl(const clang::VarDecl &d)
Emit code and set up symbol table for a variable declaration with auto, register, or no storage class...
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
mlir::Value emitOpenACCIntExpr(const Expr *intExpr)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer)
Immediately perform the destruction of the given object.
const CIRGenModule & getCIRGenModule() const
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty)
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
void emitAtomicInit(Expr *init, LValue dest)
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, int64_t value)
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d)
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, llvm::ArrayRef< mlir::Value > args={})
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
void emitCXXThrowExpr(const CXXThrowExpr *e)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
llvm::SmallVector< VPtr, 4 > VPtrsVector
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
bool sawAsmBlock
Whether or not a Microsoft-style asm block has been processed within this fuction.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
std::pair< mlir::Value, mlir::Type > emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr, std::string &constraintString)
EHScopeStack::stable_iterator currentCleanupStackDepth
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
void pushFullExprCleanup(CleanupKind kind, As... a)
Push a cleanup to be run at the end of the current full-expression.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc)
We are performing a delegate call; that is, the current function is delegating to another one.
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
void populateCatchHandlers(cir::TryOp tryOp)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
std::optional< mlir::Value > emitAArch64SVEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args)
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize=nullptr)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
void popCleanupBlock()
Pops a cleanup block.
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
void emitAutoVarCleanups(const AutoVarEmission &emission)
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
A saved depth on the scope stack.
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:256
Represents a call to a C++ constructor.
Definition ExprCXX.h:1548
Represents a C++ constructor within a class.
Definition DeclCXX.h:2604
Represents a C++ base or member initializer.
Definition DeclCXX.h:2369
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1270
A use of a default initializer in a constructor or in aggregate initialization.
Definition ExprCXX.h:1377
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2626
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:481
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2355
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:84
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2745
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
Represents a C++ temporary.
Definition ExprCXX.h:1459
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1208
CXXTryStmt - A C++ try block, including all handlers.
Definition StmtCXX.h:69
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
CaseStmt - Represent a case statement.
Definition Stmt.h:1912
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
Represents a 'co_await' expression.
Definition ExprCXX.h:5369
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4300
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3605
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
ContinueStmt - This represents a continue.
Definition Stmt.h:3111
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
Represents a 'co_yield' expression.
Definition ExprCXX.h:5450
SourceLocExprScopeGuard(const Expr *DefaultExpr, CurrentSourceLocExprScope &Current)
Represents the current source location and context used to determine the value of the source location...
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1623
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2824
This represents one expression.
Definition Expr.h:112
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6564
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2880
Represents a function declaration or definition.
Definition Decl.h:2000
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2961
IfStmt - This represents an if/then/else.
Definition Stmt.h:2251
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3000
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2138
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
ObjCMethodDecl - Represents an instance or class method declaration.
Definition DeclObjC.h:140
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
Represents a parameter to a function.
Definition Decl.h:1790
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8332
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3152
Flags to identify the types for overloaded SVE builtins.
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2501
Exposes information about the current target.
Definition TargetInfo.h:226
Represents a declaration of a type.
Definition Decl.h:3513
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4957
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2689
#define bool
Definition gpuintrin.h:32
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
OpenACCDirectiveKind
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start, SourceLocation DirectiveLoc, SourceLocation End, ArrayRef< const OpenACCClause * > Clauses, Stmt *StructuredBlock)
const FunctionProtoType * T
CXXDtorType
C++ destructor types.
Definition ABI.h:34
#define true
Definition stdbool.h:25
static bool aggValueSlot()
static bool peepholeProtection()
static bool opAllocaEscapeByReference()
static bool generateDebugInfo()
AutoVarEmission(const clang::VarDecl &variable)
bool isEscapingByRef
True if the variable is a __block variable that is captured by an escaping block.
Address addr
The address of the alloca for languages with explicit address space (e.g.
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
bool isConstantAggregate
True if the variable is of aggregate type and has a constant initializer.
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
Address getObjectAddress(CIRGenFunction &cgf) const
Returns the address of the object within this declaration.
std::unique_ptr< CGCoroData > data
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
mlir::Block * createCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc)
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
void updateRetLoc(mlir::Block *b, mlir::Location loc)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
llvm::PointerUnion< const clang::FunctionProtoType *, const clang::ObjCMethodDecl * > p
PrototypeWrapper(const clang::ObjCMethodDecl *md)
PrototypeWrapper(const clang::FunctionProtoType *ft)
const clang::CXXRecordDecl * vtableClass
const clang::CXXRecordDecl * nearestVBase
VlaSizePair(mlir::Value num, QualType ty)