clang 23.0.0git
CIRGenFunction.h
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
14#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
15
16#include "CIRGenBuilder.h"
17#include "CIRGenCall.h"
18#include "CIRGenModule.h"
19#include "CIRGenTypeCache.h"
20#include "CIRGenValue.h"
21#include "EHScopeStack.h"
22
23#include "Address.h"
24
27#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/ExprCXX.h"
31#include "clang/AST/Stmt.h"
32#include "clang/AST/Type.h"
38#include "llvm/ADT/ScopedHashTable.h"
39#include "llvm/IR/Instructions.h"
40
41namespace {
42class ScalarExprEmitter;
43} // namespace
44
45namespace mlir {
46namespace acc {
47class LoopOp;
48} // namespace acc
49} // namespace mlir
50
51namespace clang::CIRGen {
52
53struct CGCoroData;
54
56public:
58
59private:
60 friend class ::ScalarExprEmitter;
61 /// The builder is a helper class to create IR inside a function. The
62 /// builder is stateful, in particular it keeps an "insertion point": this
63 /// is where the next operations will be introduced.
64 CIRGenBuilderTy &builder;
65
66public:
67 /// The GlobalDecl for the current function being compiled or the global
68 /// variable currently being initialized.
70
72
73 /// The compiler-generated variable that holds the return value.
74 std::optional<mlir::Value> fnRetAlloca;
75
76 // Holds coroutine data if the current function is a coroutine. We use a
77 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
78 // in this header.
79 struct CGCoroInfo {
80 std::unique_ptr<CGCoroData> data;
81 CGCoroInfo();
83 };
85
86 bool isCoroutine() const { return curCoro.data != nullptr; }
87
88 /// The temporary alloca to hold the return value. This is
89 /// invalid iff the function has no return value.
91
92 /// Tracks function scope overall cleanup handling.
94
95 typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
96
97 /// A cleanup entry that will be promoted onto the EH scope stack at a later
98 /// point. Used by both the lifetime-extended cleanup stack (promoted when
99 /// the enclosing scope exits) and the deferred conditional cleanup stack
100 /// (promoted at the enclosing full-expression level).
101 ///
102 /// Currently only DestroyObject cleanups use this. When other cleanup types
103 /// are needed (e.g., CallLifetimeEnd), this struct can be extended with a
104 /// std::variant of cleanup data types.
112
114
116
117 /// A cleanup that was pushed to the EH stack but whose deactivation is
118 /// deferred until the enclosing CleanupDeactivationScope exits. Used to
119 /// protect partially-constructed aggregates (e.g. lambda captures) so that
120 /// already-initialized sub-objects are destroyed if a later initializer
121 /// throws, while avoiding double-destruction after full construction.
127
128 /// Scope that deactivates all enclosed deferred cleanups on exit.
129 /// Mirrors CodeGenFunction::CleanupDeactivationScope in classic codegen.
133 bool deactivated = false;
134
138
140 assert(!deactivated && "Deactivating already deactivated scope");
141 auto &stack = cgf.deferredDeactivationCleanupStack;
142 for (size_t i = stack.size(); i > oldDeactivateCleanupStackSize; i--) {
143 cgf.deactivateCleanupBlock(stack[i - 1].cleanup,
144 stack[i - 1].dominatingIP);
145 stack[i - 1].dominatingIP->erase();
146 }
147 stack.resize(oldDeactivateCleanupStackSize);
148 deactivated = true;
149 }
150
155 };
156
158
159 /// If a ParmVarDecl had the pass_object_size attribute, this will contain a
160 /// mapping from said ParmVarDecl to its implicit "object_size" parameter.
161 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *>
163
164 /// A mapping from NRVO variables to the flags used to indicate
165 /// when the NRVO has been applied to this variable.
166 llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
167
168 llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
171
172 /// CXXThisDecl - When generating code for a C++ member function,
173 /// this will hold the implicit 'this' declaration.
175 mlir::Value cxxabiThisValue = nullptr;
176 mlir::Value cxxThisValue = nullptr;
179
180 /// When generating code for a constructor or destructor, this will hold the
181 /// implicit argument (e.g. VTT).
184
185 /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
186 /// expression.
188
189 /// The values of function arguments to use when evaluating
190 /// CXXInheritedCtorInitExprs within this context.
192
193 /// The current array initialization index when evaluating an
194 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
195 mlir::Value arrayInitIndex = nullptr;
196
197 // Holds the Decl for the current outermost non-closure context
198 const clang::Decl *curFuncDecl = nullptr;
199 /// This is the inner-most code context, which includes blocks.
200 const clang::Decl *curCodeDecl = nullptr;
203
204 /// The current function or global initializer that is generated code for.
205 /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
206 /// global initializers.
207 mlir::Operation *curFn = nullptr;
208
209 /// Save Parameter Decl for coroutine.
211
212 using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
213 /// This keeps track of the CIR allocas or globals for local C
214 /// declarations.
216
217 /// The type of the condition for the emitting switch statement.
219
220 clang::ASTContext &getContext() const { return cgm.getASTContext(); }
221
222 CIRGenBuilderTy &getBuilder() { return builder; }
223
225 const CIRGenModule &getCIRGenModule() const { return cgm; }
226
228 // We currently assume this isn't called for a global initializer.
229 auto fn = mlir::cast<cir::FuncOp>(curFn);
230 return &fn.getRegion().front();
231 }
232
233 /// Sanitizers enabled for this function.
235
237 public:
241
242 private:
243 void ConstructorHelper(clang::FPOptions FPFeatures);
244 CIRGenFunction &cgf;
245 clang::FPOptions oldFPFeatures;
246 llvm::fp::ExceptionBehavior oldExcept;
247 llvm::RoundingMode oldRounding;
248 };
250
251 /// The symbol table maps a variable name to a value in the current scope.
252 /// Entering a function creates a new scope, and the function arguments are
253 /// added to the mapping. When the processing of a function is terminated,
254 /// the scope is destroyed and the mappings created in this scope are
255 /// dropped.
256 using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
258
259 /// Whether a cir.stacksave operation has been added. Used to avoid
260 /// inserting cir.stacksave for multiple VLAs in the same scope.
261 bool didCallStackSave = false;
262
263 /// Whether or not a Microsoft-style asm block has been processed within
264 /// this fuction. These can potentially set the return value.
265 bool sawAsmBlock = false;
266
267 /// In C++, whether we are code generating a thunk. This controls whether we
268 /// should emit cleanups.
269 bool curFuncIsThunk = false;
270
271 mlir::Type convertTypeForMem(QualType t);
272
273 mlir::Type convertType(clang::QualType t);
274 mlir::Type convertType(const TypeDecl *t) {
275 return convertType(getContext().getTypeDeclType(t));
276 }
277
278 /// Get integer from a mlir::Value that is an int constant or a constant op.
279 static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
280 auto constOp = val.getDefiningOp<cir::ConstantOp>();
281 assert(constOp && "getSExtIntValueFromConstOp call with non ConstantOp");
282 return constOp.getIntValue().getSExtValue();
283 }
284
285 /// Get zero-extended integer from a mlir::Value that is an int constant or a
286 /// constant op.
287 static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
288 auto constOp = val.getDefiningOp<cir::ConstantOp>();
289 assert(constOp && "getZExtIntValueFromConstOp call with non ConstantOp");
290 return constOp.getIntValue().getZExtValue();
291 }
292
293 /// Return the cir::TypeEvaluationKind of QualType \c type.
295
299
303
305 bool suppressNewContext = false);
307
308 CIRGenTypes &getTypes() const { return cgm.getTypes(); }
309
310 const TargetInfo &getTarget() const { return cgm.getTarget(); }
311 mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
312
314 return cgm.getTargetCIRGenInfo();
315 }
316
317 // ---------------------
318 // Opaque value handling
319 // ---------------------
320
321 /// Keeps track of the current set of opaque value expressions.
322 llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
323 llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
324
325 // This keeps track of the associated size for each VLA type.
326 // We track this by the size expression rather than the type itself because
327 // in certain situations, like a const qualifier applied to an VLA typedef,
328 // multiple VLA types can share the same size expression.
329 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
330 // enter/leave scopes.
331 llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
332
333public:
334 /// A non-RAII class containing all the information about a bound
335 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
336 /// this which makes individual mappings very simple; using this
337 /// class directly is useful when you have a variable number of
338 /// opaque values or don't want the RAII functionality for some
339 /// reason.
340 class OpaqueValueMappingData {
341 const OpaqueValueExpr *opaqueValue;
342 bool boundLValue;
343
344 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
345 : opaqueValue(ov), boundLValue(boundLValue) {}
346
347 public:
348 OpaqueValueMappingData() : opaqueValue(nullptr) {}
349
350 static bool shouldBindAsLValue(const Expr *expr) {
351 // gl-values should be bound as l-values for obvious reasons.
352 // Records should be bound as l-values because IR generation
353 // always keeps them in memory. Expressions of function type
354 // act exactly like l-values but are formally required to be
355 // r-values in C.
356 return expr->isGLValue() || expr->getType()->isFunctionType() ||
358 }
359
361 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
362 if (shouldBindAsLValue(ov))
363 return bind(cgf, ov, cgf.emitLValue(e));
364 return bind(cgf, ov, cgf.emitAnyExpr(e));
365 }
366
368 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
369 assert(shouldBindAsLValue(ov));
370 cgf.opaqueLValues.insert(std::make_pair(ov, lv));
371 return OpaqueValueMappingData(ov, true);
372 }
373
375 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
376 assert(!shouldBindAsLValue(ov));
377 cgf.opaqueRValues.insert(std::make_pair(ov, rv));
378
379 OpaqueValueMappingData data(ov, false);
380
381 // Work around an extremely aggressive peephole optimization in
382 // EmitScalarConversion which assumes that all other uses of a
383 // value are extant.
385 return data;
386 }
387
388 bool isValid() const { return opaqueValue != nullptr; }
389 void clear() { opaqueValue = nullptr; }
390
392 assert(opaqueValue && "no data to unbind!");
393
394 if (boundLValue) {
395 cgf.opaqueLValues.erase(opaqueValue);
396 } else {
397 cgf.opaqueRValues.erase(opaqueValue);
399 }
400 }
401 };
402
403 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
405 CIRGenFunction &cgf;
407
408 public:
412
413 /// Build the opaque value mapping for the given conditional
414 /// operator if it's the GNU ?: extension. This is a common
415 /// enough pattern that the convenience operator is really
416 /// helpful.
417 ///
420 : cgf(cgf) {
421 if (mlir::isa<ConditionalOperator>(op))
422 // Leave Data empty.
423 return;
424
426 mlir::cast<BinaryConditionalOperator>(op);
428 e->getCommon());
429 }
430
431 /// Build the opaque value mapping for an OpaqueValueExpr whose source
432 /// expression is set to the expression the OVE represents.
434 : cgf(cgf) {
435 if (ov) {
436 assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
437 "for OVE with no source expression");
438 data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
439 }
440 }
441
443 LValue lvalue)
444 : cgf(cgf),
445 data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
446
448 RValue rvalue)
449 : cgf(cgf),
450 data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
451
452 void pop() {
453 data.unbind(cgf);
454 data.clear();
455 }
456
458 if (data.isValid())
459 data.unbind(cgf);
460 }
461 };
462
463private:
464 /// Declare a variable in the current scope, return success if the variable
465 /// wasn't declared yet.
466 void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
467 mlir::Location loc, clang::CharUnits alignment,
468 bool isParam = false);
469
470public:
471 mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
472
473 void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
474
475private:
476 // Track current variable initialization (if there's one)
477 const clang::VarDecl *currVarDecl = nullptr;
478 class VarDeclContext {
480 const clang::VarDecl *oldVal = nullptr;
481
482 public:
483 VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
484 if (p.currVarDecl)
485 oldVal = p.currVarDecl;
486 p.currVarDecl = value;
487 }
488
489 /// Can be used to restore the state early, before the dtor
490 /// is run.
491 void restore() { p.currVarDecl = oldVal; }
492 ~VarDeclContext() { restore(); }
493 };
494
495public:
496 /// Use to track source locations across nested visitor traversals.
497 /// Always use a `SourceLocRAIIObject` to change currSrcLoc.
498 std::optional<mlir::Location> currSrcLoc;
500 CIRGenFunction &cgf;
501 std::optional<mlir::Location> oldLoc;
502
503 public:
504 SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
505 if (cgf.currSrcLoc)
506 oldLoc = cgf.currSrcLoc;
507 cgf.currSrcLoc = value;
508 }
509
510 /// Can be used to restore the state early, before the dtor
511 /// is run.
512 void restore() { cgf.currSrcLoc = oldLoc; }
514 };
515
517 llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
518
519 /// Hold counters for incrementally naming temporaries
520 unsigned counterRefTmp = 0;
521 unsigned counterAggTmp = 0;
522 std::string getCounterRefTmpAsString();
523 std::string getCounterAggTmpAsString();
524
525 /// Helpers to convert Clang's SourceLocation to a MLIR Location.
526 mlir::Location getLoc(clang::SourceLocation srcLoc);
527 mlir::Location getLoc(clang::SourceRange srcLoc);
528 mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
529
530 const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
531
532 /// True if an insertion point is defined. If not, this indicates that the
533 /// current code being emitted is unreachable.
534 /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
535 /// since we don't yet force null insertion point to designate behavior (like
536 /// LLVM's codegen does) and we probably shouldn't.
537 bool haveInsertPoint() const {
538 return builder.getInsertionBlock() != nullptr;
539 }
540
541 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
542 // an ObjCMethodDecl.
544 llvm::PointerUnion<const clang::FunctionProtoType *,
545 const clang::ObjCMethodDecl *>
547
550 };
551
553
556 RValue emitAtomicLoad(LValue lvalue, SourceLocation loc, cir::MemOrder order,
557 bool isVolatile = false,
559
560 /// An abstract representation of regular/ObjC call/message targets.
562 /// The function declaration of the callee.
563 [[maybe_unused]] const clang::Decl *calleeDecl;
564
565 public:
566 AbstractCallee() : calleeDecl(nullptr) {}
567 AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
568
569 bool hasFunctionDecl() const {
570 return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
571 }
572
573 const clang::Decl *getDecl() const { return calleeDecl; }
574
575 unsigned getNumParams() const {
576 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
577 return fd->getNumParams();
578 return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
579 }
580
581 const clang::ParmVarDecl *getParamDecl(unsigned I) const {
582 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
583 return fd->getParamDecl(I);
584 return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
585 I);
586 }
587 };
588
589 struct VlaSizePair {
590 mlir::Value numElts;
592
593 VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
594 };
595
596 /// Return the number of elements for a single dimension
597 /// for the given array type.
598 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
599
600 /// Returns an MLIR::Value+QualType pair that corresponds to the size,
601 /// in non-variably-sized elements, of a variable length array type,
602 /// plus that largest non-variably-sized element type. Assumes that
603 /// the type has already been emitted with emitVariablyModifiedType.
604 VlaSizePair getVLASize(const VariableArrayType *type);
605 VlaSizePair getVLASize(QualType type);
606
608
609 mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
610 return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
611 }
612
613 void finishFunction(SourceLocation endLoc);
614
615 /// Determine whether the given initializer is trivial in the sense
616 /// that it requires no code to be generated.
617 bool isTrivialInitializer(const Expr *init);
618
619 /// If the specified expression does not fold to a constant, or if it does but
620 /// contains a label, return false. If it constant folds return true and set
621 /// the boolean result in Result.
622 bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
623 bool allowLabels = false);
625 llvm::APSInt &resultInt,
626 bool allowLabels = false);
627
628 /// Return true if the statement contains a label in it. If
629 /// this statement is not executed normally, it not containing a label means
630 /// that we can just remove the code.
631 bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
632
633 Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
634
635 class ConstantEmission {
636 // Cannot use mlir::TypedAttr directly here because of bit availability.
637 llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
638 ConstantEmission(mlir::TypedAttr c, bool isReference)
639 : valueAndIsReference(c, isReference) {}
640
641 public:
643 static ConstantEmission forReference(mlir::TypedAttr c) {
644 return ConstantEmission(c, true);
645 }
646 static ConstantEmission forValue(mlir::TypedAttr c) {
647 return ConstantEmission(c, false);
648 }
649
650 explicit operator bool() const {
651 return valueAndIsReference.getOpaqueValue() != nullptr;
652 }
653
654 bool isReference() const { return valueAndIsReference.getInt(); }
656 assert(isReference());
657 cgf.cgm.errorNYI(refExpr->getSourceRange(),
658 "ConstantEmission::getReferenceLValue");
659 return {};
660 }
661
662 mlir::TypedAttr getValue() const {
663 assert(!isReference());
664 return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
665 }
666 };
667
668 ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
669 ConstantEmission tryEmitAsConstant(const MemberExpr *me);
670
673 /// The address of the alloca for languages with explicit address space
674 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
675 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
676 /// as a global constant.
678
679 /// True if the variable is of aggregate type and has a constant
680 /// initializer.
682
683 /// True if the variable is a __block variable that is captured by an
684 /// escaping block.
685 bool isEscapingByRef = false;
686
687 /// True if the variable was emitted as an offload recipe, and thus doesn't
688 /// have the same sort of alloca initialization.
689 bool emittedAsOffload = false;
690
691 mlir::Value nrvoFlag{};
692
693 struct Invalid {};
695
698
700
701 bool wasEmittedAsGlobal() const { return !addr.isValid(); }
702
704
705 /// Returns the raw, allocated address, which is not necessarily
706 /// the address of the object itself. It is casted to default
707 /// address space for address space agnostic languages.
708 Address getAllocatedAddress() const { return addr; }
709
710 // Changes the stored address for the emission. This function should only
711 // be used in extreme cases, and isn't required to model normal AST
712 // initialization/variables.
714
715 /// Returns the address of the object within this declaration.
716 /// Note that this does not chase the forwarding pointer for
717 /// __block decls.
719 if (!isEscapingByRef)
720 return addr;
721
723 return Address::invalid();
724 }
725 };
726
727 /// IndirectBranch - The first time an indirect goto is seen we create a block
728 /// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
729 /// is emitted at the end of the function, once all block destinations have
730 /// been resolved.
731 mlir::Block *indirectGotoBlock = nullptr;
732
735
736 /// Perform the usual unary conversions on the specified expression and
737 /// compare the result against zero, returning an Int1Ty value.
738 mlir::Value evaluateExprAsBool(const clang::Expr *e);
739
740 cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
741 cir::GlobalOp gv,
742 cir::GetGlobalOp gvAddr);
743
744 /// Enter the cleanups necessary to complete the given phase of destruction
745 /// for a destructor. The end result should call destructors on members and
746 /// base classes in reverse order of their construction.
748
749 /// Determines whether an EH cleanup is required to destroy a type
750 /// with the given destruction kind.
751 /// TODO(cir): could be shared with Clang LLVM codegen
753 switch (kind) {
755 return false;
759 return getLangOpts().Exceptions;
761 return getLangOpts().Exceptions &&
762 cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
763 }
764 llvm_unreachable("bad destruction kind");
765 }
766
770
772
773 /// Set the address of a local variable.
775 assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
776 localDeclMap.insert({vd, addr});
777
778 // Add to the symbol table if not there already.
779 if (symbolTable.count(vd))
780 return;
781 symbolTable.insert(vd, addr.getPointer());
782 }
783
784 // Replaces the address of the local variable, if it exists. Else does the
785 // same thing as setAddrOfLocalVar.
787 localDeclMap.insert_or_assign(vd, addr);
788 }
789
790 // A class to allow reverting changes to a var-decl's registration to the
791 // localDeclMap. This is used in cases where things are being inserted into
792 // the variable list but don't follow normal lookup/search rules, like in
793 // OpenACC recipe generation.
795 CIRGenFunction &cgf;
796 const VarDecl *vd;
797 bool shouldDelete = false;
798 Address oldAddr = Address::invalid();
799
800 public:
802 : cgf(cgf), vd(vd) {
803 auto mapItr = cgf.localDeclMap.find(vd);
804
805 if (mapItr != cgf.localDeclMap.end())
806 oldAddr = mapItr->second;
807 else
808 shouldDelete = true;
809 }
810
812 if (shouldDelete)
813 cgf.localDeclMap.erase(vd);
814 else
815 cgf.localDeclMap.insert_or_assign(vd, oldAddr);
816 }
817 };
818
820
823
824 static bool
826
833
836
840 const clang::CXXRecordDecl *nearestVBase,
841 clang::CharUnits offsetFromNearestVBase,
842 bool baseIsNonVirtualPrimaryBase,
843 const clang::CXXRecordDecl *vtableClass,
844 VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
845 /// Return the Value of the vtable pointer member pointed to by thisAddr.
846 mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
847 const clang::CXXRecordDecl *vtableClass);
848
849 /// Returns whether we should perform a type checked load when loading a
850 /// virtual function for virtual calls to members of RD. This is generally
851 /// true when both vcall CFI and whole-program-vtables are enabled.
853
854 /// Source location information about the default argument or member
855 /// initializer expression we're evaluating, if any.
859
860 /// A scope within which we are constructing the fields of an object which
861 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
862 /// we need to evaluate the CXXDefaultInitExpr within the evaluation.
864 public:
866 : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
867 cgf.cxxDefaultInitExprThis = thisAddr;
868 }
870 cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
871 }
872
873 private:
874 CIRGenFunction &cgf;
875 Address oldCXXDefaultInitExprThis;
876 };
877
878 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
879 /// is overridden to be the object under construction.
881 public:
886 cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
887 cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
888 }
890 cgf.cxxThisValue = oldCXXThisValue;
891 cgf.cxxThisAlignment = oldCXXThisAlignment;
892 }
893
894 public:
896 mlir::Value oldCXXThisValue;
899 };
900
905
906 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
907 /// current loop index is overridden. In order to encourage re-use of existing
908 /// array initialization, this uses a flag to determine if it is a 'no-op' or
909 /// not.
911 public:
912 ArrayInitLoopExprScope(CIRGenFunction &cgf, bool setIdx, mlir::Value index)
913 : cgf(cgf),
914 oldArrayInitIndex(setIdx
915 ? std::optional<mlir::Value>(cgf.arrayInitIndex)
916 : std::nullopt) {
917 if (setIdx)
918 cgf.arrayInitIndex = index;
919 }
921 if (oldArrayInitIndex.has_value())
922 cgf.arrayInitIndex = *oldArrayInitIndex;
923 }
924
925 private:
926 CIRGenFunction &cgf;
927 std::optional<mlir::Value> oldArrayInitIndex;
928 };
929
930 /// Get the index of the current ArrayInitLoopExpr, if any.
931 mlir::Value getArrayInitIndex() { return arrayInitIndex; }
932
934 LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
935
936 /// Construct an address with the natural alignment of T. If a pointer to T
937 /// is expected to be signed, the pointer passed to this function must have
938 /// been signed, and the returned Address will have the pointer authentication
939 /// information needed to authenticate the signed pointer.
941 CharUnits alignment,
942 bool forPointeeType = false,
943 LValueBaseInfo *baseInfo = nullptr) {
944 if (alignment.isZero())
945 alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
946 return Address(ptr, convertTypeForMem(t), alignment);
947 }
948
950 Address value, const CXXRecordDecl *derived,
951 llvm::iterator_range<CastExpr::path_const_iterator> path,
952 bool nullCheckValue, SourceLocation loc);
953
955 mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
956 llvm::iterator_range<CastExpr::path_const_iterator> path,
957 bool nullCheckValue);
958
959 /// Return the VTT parameter that should be passed to a base
960 /// constructor/destructor with virtual bases.
961 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
962 /// to ItaniumCXXABI.cpp together with all the references to VTT.
963 mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
964 bool delegating);
965
968 return makeAddrLValue(addr, ty, LValueBaseInfo(source));
969 }
970
972 return LValue::makeAddr(addr, ty, baseInfo);
973 }
974
975 void initializeVTablePointers(mlir::Location loc,
976 const clang::CXXRecordDecl *rd);
977 void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
978
980
981 /// Return the address of a local variable.
983 auto it = localDeclMap.find(vd);
984 assert(it != localDeclMap.end() &&
985 "Invalid argument to getAddrOfLocalVar(), no decl!");
986 return it->second;
987 }
988
990 mlir::Type fieldType, unsigned index);
991
992 /// Given an opaque value expression, return its LValue mapping if it exists,
993 /// otherwise create one.
995
996 /// Given an opaque value expression, return its RValue mapping if it exists,
997 /// otherwise create one.
999
1000 /// Load the value for 'this'. This function is only valid while generating
1001 /// code for an C++ member function.
1002 /// FIXME(cir): this should return a mlir::Value!
1003 mlir::Value loadCXXThis() {
1004 assert(cxxThisValue && "no 'this' value for this function");
1005 return cxxThisValue;
1006 }
1008
1009 /// Load the VTT parameter to base constructors/destructors have virtual
1010 /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
1011 /// be abstracted properly.
1012 mlir::Value loadCXXVTT() {
1013 assert(cxxStructorImplicitParamValue && "no VTT value for this function");
1015 }
1016
1017 /// Convert the given pointer to a complete class to the given direct base.
1019 Address value,
1020 const CXXRecordDecl *derived,
1021 const CXXRecordDecl *base,
1022 bool baseIsVirtual);
1023
1024 /// Determine whether a return value slot may overlap some other object.
1026 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
1027 // class subobjects. These cases may need to be revisited depending on the
1028 // resolution of the relevant core issue.
1030 }
1031
1032 /// Determine whether a base class initialization may overlap some other
1033 /// object.
1035 const CXXRecordDecl *baseRD,
1036 bool isVirtual);
1037
1038 /// Get an appropriate 'undef' rvalue for the given type.
1039 /// TODO: What's the equivalent for MLIR? Currently we're only using this for
1040 /// void types so it just returns RValue::get(nullptr) but it'll need
1041 /// addressed later.
1043
1044 cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
1045 cir::FuncType funcType);
1046
1048 FunctionArgList &args);
1049
1050 /// Emit the function prologue: declare function arguments in the symbol
1051 /// table.
1052 void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
1053 const FunctionDecl *fd, SourceLocation bodyBeginLoc);
1054
1055 /// Emit code for the start of a function.
1056 /// \param loc The location to be associated with the function.
1057 /// \param startLoc The location of the function body.
1059 cir::FuncOp fn, cir::FuncType funcType,
1061 clang::SourceLocation startLoc);
1062
1063 /// returns true if aggregate type has a volatile member.
1065 if (const auto *rd = t->getAsRecordDecl())
1066 return rd->hasVolatileMember();
1067 return false;
1068 }
1069
1070 void addCatchHandlerAttr(const CXXCatchStmt *catchStmt,
1071 SmallVector<mlir::Attribute> &handlerAttrs);
1072
1073 /// The cleanup depth enclosing all the cleanups associated with the
1074 /// parameters.
1076
1078
1079 /// Takes the old cleanup stack size and emits the cleanup blocks
1080 /// that have been added.
1081 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
1082 ArrayRef<mlir::Value *> valuesToReload = {});
1083
1084 /// Pops cleanup blocks until the given savepoint is reached, then adds the
1085 /// cleanups from the given savepoint in the lifetime-extended cleanups stack.
1086 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
1087 size_t oldLifetimeExtendedSize,
1088 ArrayRef<mlir::Value *> valuesToReload = {});
1089 void popCleanupBlock(bool forDeactivation = false);
1090
1091 void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc);
1092
1093 /// Deactivates the given cleanup block. The block cannot be reactivated. Pops
1094 /// it if it's the top of the stack.
1095 ///
1096 /// \param DominatingIP - An instruction which is known to
1097 /// dominate the current IP (if set) and which lies along
1098 /// all paths of execution between the current IP and the
1099 /// the point at which the cleanup comes into scope.
1100 void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup,
1101 mlir::Operation *dominatingIP);
1102
1103 /// Create an active flag variable for use with conditional cleanups. The
1104 /// flag is initialized to false before the outermost conditional and set to
1105 /// true at the current insertion point (inside the conditional branch).
1106 Address createCleanupActiveFlag();
1107
1108 /// Set up the last cleanup that was pushed as a conditional
1109 /// full-expression cleanup.
1110 void initFullExprCleanup();
1111 void initFullExprCleanupWithFlag(Address activeFlag);
1112
1113 /// Promote a single pending cleanup entry onto the EH scope stack. If the
1114 /// entry has a valid activeFlag, the cleanup is configured as conditional.
1115 /// Defined in CIRGenDecl.cpp where the concrete cleanup types are visible.
1116 void pushPendingCleanupToEHStack(const PendingCleanupEntry &entry);
1117
1118 /// Push a cleanup to be run at the end of the current full-expression. Safe
1119 /// against the possibility that we're currently inside a
1120 /// conditionally-evaluated expression.
1121 template <class T, class... As>
1123 if (!isInConditionalBranch())
1124 return ehStack.pushCleanup<T>(kind, a...);
1125
1126 // Defer the cleanup until the FullExprCleanupScope exits. We can't push
1127 // to the EH stack now because the ternary's inner LexicalScope would pop
1128 // it prematurely.
1129 Address activeFlag = createCleanupActiveFlag();
1131 PendingCleanupEntry{kind, a..., activeFlag});
1132 }
1133
1134 /// Push a cleanup and record it for deferred deactivation. The cleanup will
1135 /// be deactivated when the enclosing CleanupDeactivationScope exits.
1136 template <class T, class... As>
1138 mlir::Location loc = builder.getUnknownLoc();
1139 mlir::Operation *dominatingIP = builder.getBool(false, loc).getOperation();
1140 ehStack.pushCleanup<T>(kind, a...);
1142 {ehStack.stable_begin(), dominatingIP});
1143 }
1144
1146 Address addr, QualType type);
1148 QualType type, Destroyer *destroyer,
1149 bool useEHCleanupForArray);
1150
1151 /// Queue a cleanup to be pushed after finishing the current full-expression.
1152 /// When the enclosing RunCleanupsScope exits, popCleanupBlocks promotes these
1153 /// entries onto the EH scope stack for the enclosing scope.
1155 Destroyer *destroyer) {
1156 lifetimeExtendedCleanupStack.push_back({kind, addr, type, destroyer});
1157 }
1158
1159 /// Enters a new scope for capturing cleanups, all of which
1160 /// will be executed once the scope is exited.
1161 class RunCleanupsScope {
1162 EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
1163 size_t lifetimeExtendedCleanupStackSize;
1164 CleanupDeactivationScope deactivateCleanups;
1165
1166 protected:
1169
1170 private:
1171 RunCleanupsScope(const RunCleanupsScope &) = delete;
1172 void operator=(const RunCleanupsScope &) = delete;
1173
1174 protected:
1176
1177 public:
1178 /// Enter a new cleanup scope.
1180 : deactivateCleanups(cgf), performCleanup(true), cgf(cgf) {
1181 cleanupStackDepth = cgf.ehStack.stable_begin();
1182 lifetimeExtendedCleanupStackSize =
1183 cgf.lifetimeExtendedCleanupStack.size();
1184 oldDidCallStackSave = cgf.didCallStackSave;
1185 cgf.didCallStackSave = false;
1186 oldCleanupStackDepth = cgf.currentCleanupStackDepth;
1187 cgf.currentCleanupStackDepth = cleanupStackDepth;
1188 }
1189
1190 /// Exit this cleanup scope, emitting any accumulated cleanups.
1192 if (performCleanup)
1193 forceCleanup();
1194 }
1195
1196 /// Force the emission of cleanups now, instead of waiting
1197 /// until this object is destroyed.
1198 void forceCleanup(ArrayRef<mlir::Value *> valuesToReload = {}) {
1199 assert(performCleanup && "Already forced cleanup");
1201 deactivateCleanups.forceDeactivate();
1202 cgf.popCleanupBlocks(cleanupStackDepth, lifetimeExtendedCleanupStackSize,
1203 valuesToReload);
1204 performCleanup = false;
1205 cgf.currentCleanupStackDepth = oldCleanupStackDepth;
1206 }
1207
1208 /// Whether there are any pending cleanups that have been pushed since
1209 /// this scope was entered.
1210 bool hasPendingCleanups() const {
1211 return cgf.ehStack.stable_begin() != cleanupStackDepth;
1212 }
1213 };
1214
1215 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1217
1219 CIRGenFunction &cgf;
1220 RunCleanupsScope cleanups;
1221 cir::CleanupScopeOp scope;
1222 size_t deferredCleanupStackSize;
1223 bool exited = false;
1224
1225 public:
1226 FullExprCleanupScope(CIRGenFunction &cgf, const Expr *subExpr);
1227
1228 void exit(ArrayRef<mlir::Value *> valuesToReload = {});
1229
1231 if (!exited)
1232 exit();
1233 }
1234
1235 private:
1237 void operator=(const FullExprCleanupScope &) = delete;
1238 };
1239
1240public:
1241 /// Represents a scope, including function bodies, compound statements, and
1242 /// the substatements of if/while/do/for/switch/try statements. This class
1243 /// handles any automatic cleanup, along with the return value.
1244 struct LexicalScope : public RunCleanupsScope {
1245 private:
1246 // Points to the scope entry block. This is useful, for instance, for
1247 // helping to insert allocas before finalizing any recursive CodeGen from
1248 // switches.
1249 mlir::Block *entryBlock;
1250
1251 LexicalScope *parentScope = nullptr;
1252
1253 // Holds the actual value for ScopeKind::Try
1254 cir::TryOp tryOp = nullptr;
1255
1256 // On a coroutine body, the OnFallthrough sub stmt holds the handler
1257 // (CoreturnStmt) for control flow falling off the body. Keep track
1258 // of emitted co_return in this scope and allow OnFallthrough to be
1259 // skipeed.
1260 bool hasCoreturnStmt = false;
1261
1262 // Only Regular is used at the moment. Support for other kinds will be
1263 // added as the relevant statements/expressions are upstreamed.
1264 enum Kind {
1265 Regular, // cir.if, cir.scope, if_regions
1266 Ternary, // cir.ternary
1267 Switch, // cir.switch
1268 Try, // cir.try
1269 GlobalInit // cir.global initialization code
1270 };
1271 Kind scopeKind = Kind::Regular;
1272
1273 // The scope return value.
1274 mlir::Value retVal = nullptr;
1275
1276 mlir::Location beginLoc;
1277 mlir::Location endLoc;
1278
1279 public:
1280 unsigned depth = 0;
1281
1282 LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
1283 : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
1284 beginLoc(loc), endLoc(loc) {
1285
1286 assert(entryBlock && "LexicalScope requires an entry block");
1287 cgf.curLexScope = this;
1288 if (parentScope)
1289 ++depth;
1290
1291 if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
1292 assert(fusedLoc.getLocations().size() == 2 && "too many locations");
1293 beginLoc = fusedLoc.getLocations()[0];
1294 endLoc = fusedLoc.getLocations()[1];
1295 }
1296 }
1297
1298 void setRetVal(mlir::Value v) { retVal = v; }
1299
1300 void cleanup();
1301 void restore() { cgf.curLexScope = parentScope; }
1302
1305 cleanup();
1306 restore();
1307 }
1308
1309 // ---
1310 // Coroutine tracking
1311 // ---
1312 bool hasCoreturn() const { return hasCoreturnStmt; }
1313 void setCoreturn() { hasCoreturnStmt = true; }
1314
1315 // ---
1316 // Kind
1317 // ---
1318 bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
1319 bool isRegular() { return scopeKind == Kind::Regular; }
1320 bool isSwitch() { return scopeKind == Kind::Switch; }
1321 bool isTernary() { return scopeKind == Kind::Ternary; }
1322 bool isTry() { return scopeKind == Kind::Try; }
1323 cir::TryOp getClosestTryParent();
1324 void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
1325 void setAsSwitch() { scopeKind = Kind::Switch; }
1326 void setAsTernary() { scopeKind = Kind::Ternary; }
1327 void setAsTry(cir::TryOp op) {
1328 scopeKind = Kind::Try;
1329 tryOp = op;
1330 }
1331
1332 cir::TryOp getTry() {
1333 assert(isTry());
1334 return tryOp;
1335 }
1336
1337 // ---
1338 // Return handling.
1339 // ---
1340
1341 private:
1342 // On switches we need one return block per region, since cases don't
1343 // have their own scopes but are distinct regions nonetheless.
1344
1345 // TODO: This implementation should change once we have support for early
1346 // exits in MLIR structured control flow (llvm-project#161575)
1348 llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
1349 llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
1350 std::optional<unsigned> normalRetBlockIndex;
1351
1352 // There's usually only one ret block per scope, but this needs to be
1353 // get or create because of potential unreachable return statements, note
1354 // that for those, all source location maps to the first one found.
1355 mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1356 assert((isa_and_nonnull<cir::CaseOp>(
1357 cgf.builder.getBlock()->getParentOp()) ||
1358 retBlocks.size() == 0) &&
1359 "only switches can hold more than one ret block");
1360
1361 // Create the return block but don't hook it up just yet.
1362 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
1363 auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
1364 retBlocks.push_back(b);
1365 updateRetLoc(b, loc);
1366 return b;
1367 }
1368
1369 cir::ReturnOp emitReturn(mlir::Location loc);
1370 void emitImplicitReturn();
1371
1372 public:
1374 mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
1375 void updateRetLoc(mlir::Block *b, mlir::Location loc) {
1376 retLocs.insert_or_assign(b, loc);
1377 }
1378
1379 mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1380 // Check if we're inside a case region
1381 if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
1382 cgf.builder.getBlock()->getParentOp())) {
1383 auto iter = retBlockInCaseIndex.find(caseOp);
1384 if (iter != retBlockInCaseIndex.end()) {
1385 // Reuse existing return block
1386 mlir::Block *ret = retBlocks[iter->second];
1387 updateRetLoc(ret, loc);
1388 return ret;
1389 }
1390 // Create new return block
1391 mlir::Block *ret = createRetBlock(cgf, loc);
1392 retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
1393 return ret;
1394 }
1395
1396 if (normalRetBlockIndex) {
1397 mlir::Block *ret = retBlocks[*normalRetBlockIndex];
1398 updateRetLoc(ret, loc);
1399 return ret;
1400 }
1401
1402 mlir::Block *ret = createRetBlock(cgf, loc);
1403 normalRetBlockIndex = retBlocks.size() - 1;
1404 return ret;
1405 }
1406
1407 mlir::Block *getEntryBlock() { return entryBlock; }
1408 };
1409
1411
1413
1415 QualType type);
1416
1417 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
1418 QualType type);
1419
1421 Destroyer *destroyer);
1422
1424 QualType type, Destroyer *destroyer,
1425 bool useEHCleanupForArray);
1426
1428
1429 void pushIrregularPartialArrayCleanup(mlir::Value arrayBegin,
1430 Address arrayEndPointer,
1431 QualType elementType,
1432 CharUnits elementAlign,
1433 Destroyer *destroyer);
1434
1435 /// Start generating a thunk function.
1436 void startThunk(cir::FuncOp fn, GlobalDecl gd,
1437 const CIRGenFunctionInfo &fnInfo, bool isUnprototyped);
1438
1439 /// Finish generating a thunk function.
1440 void finishThunk();
1441
1442 /// Generate code for a thunk function.
1443 void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo,
1444 GlobalDecl gd, const ThunkInfo &thunk,
1445 bool isUnprototyped);
1446
1447 /// ----------------------
1448 /// CIR emit functions
1449 /// ----------------------
1450public:
1451 bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr,
1453 clang::SVETypeFlags typeFlags);
1454 mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts,
1455 mlir::Location loc);
1456 std::optional<mlir::Value>
1457 emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
1459 llvm::Triple::ArchType arch);
1460 std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
1461 const CallExpr *expr);
1462 std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
1463 const CallExpr *expr);
1464
1465 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
1466 SourceLocation loc,
1467 SourceLocation assumptionLoc,
1468 int64_t alignment,
1469 mlir::Value offsetValue = nullptr);
1470
1471 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
1472 SourceLocation assumptionLoc,
1473 int64_t alignment,
1474 mlir::Value offsetValue = nullptr);
1475
1476private:
1477 void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
1478 clang::CharUnits alignment);
1479
1480 CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
1481
1482public:
1484 llvm::StringRef fieldName,
1485 unsigned fieldIndex);
1486
1487 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1488 mlir::Location loc, clang::CharUnits alignment,
1489 bool insertIntoFnEntryBlock,
1490 mlir::Value arraySize = nullptr);
1491 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1492 mlir::Location loc, clang::CharUnits alignment,
1493 mlir::OpBuilder::InsertPoint ip,
1494 mlir::Value arraySize = nullptr);
1495
1496 void emitAggregateStore(mlir::Value value, Address dest);
1497
1498 void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
1499
1501
1503
1504 /// Emit an aggregate copy.
1505 ///
1506 /// \param isVolatile \c true iff either the source or the destination is
1507 /// volatile.
1508 /// \param MayOverlap Whether the tail padding of the destination might be
1509 /// occupied by some other object. More efficient code can often be
1510 /// generated if not.
1511 void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
1512 AggValueSlot::Overlap_t mayOverlap,
1513 bool isVolatile = false);
1514
1515 /// Emit code to compute the specified expression which can have any type. The
1516 /// result is returned as an RValue struct. If this is an aggregate
1517 /// expression, the aggloc/agglocvolatile arguments indicate where the result
1518 /// should be returned.
1521 bool ignoreResult = false);
1522
1523 /// Emits the code necessary to evaluate an arbitrary expression into the
1524 /// given memory location.
1525 void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
1526 bool isInitializer);
1527
1528 /// Similarly to emitAnyExpr(), however, the result will always be accessible
1529 /// even if no aggregate location is provided.
1531
1532 void emitAnyExprToExn(const Expr *e, Address addr);
1533
1534 void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
1535 QualType elementType, CharUnits elementAlign,
1536 Destroyer *destroyer);
1537
1538 mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
1539 QualType &baseType, Address &addr);
1542
1544
1546 LValueBaseInfo *baseInfo = nullptr);
1547
1548 std::pair<mlir::Value, mlir::Type>
1550 QualType inputType, std::string &constraintString,
1551 SourceLocation loc);
1552 std::pair<mlir::Value, mlir::Type>
1553 emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr,
1554 std::string &constraintString);
1555 mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
1556
1558 void emitAtomicInit(Expr *init, LValue dest);
1559 void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
1560 void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
1561 bool isVolatile, bool isInit);
1563 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
1564 llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
1565
1566 mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s);
1567
1568 AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
1569 mlir::OpBuilder::InsertPoint ip = {});
1570
1572
1573 /// Emit code and set up symbol table for a variable declaration with auto,
1574 /// register, or no storage class specifier. These turn into simple stack
1575 /// objects, globals depending on target.
1576 void emitAutoVarDecl(const clang::VarDecl &d);
1577
1578 void emitAutoVarCleanups(const AutoVarEmission &emission);
1579 /// Emit the initializer for an allocated variable. If this call is not
1580 /// associated with the call to emitAutoVarAlloca (as the address of the
1581 /// emission is not directly an alloca), the allocatedSeparately parameter can
1582 /// be used to suppress the assertions. However, this should only be used in
1583 /// extreme cases, as it doesn't properly reflect the language/AST.
1584 void emitAutoVarInit(const AutoVarEmission &emission);
1585 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1587
1588 void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
1589
1590 void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
1591 CXXCtorInitializer *baseInit);
1592
1594
1595 mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
1596
1597 RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
1598 const clang::CallExpr *e, ReturnValueSlot returnValue);
1599
1600 /// Returns a Value corresponding to the size of the given expression by
1601 /// emitting a `cir.objsize` operation.
1602 ///
1603 /// \param e The expression whose object size to compute
1604 /// \param type Determines the semantics of the object size computation.
1605 /// The type parameter is a 2-bit value where:
1606 /// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
1607 /// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
1608 /// \param resType The result type for the size value
1609 /// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
1610 /// call `cir.objsize` on this value rather than emitting e.
1611 /// \param isDynamic If true, allows runtime evaluation via dynamic mode
1612 mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
1613 cir::IntType resType, mlir::Value emittedE,
1614 bool isDynamic);
1615
1616 mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
1617 unsigned type,
1618 cir::IntType resType,
1619 mlir::Value emittedE,
1620 bool isDynamic);
1621
1622 int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
1623
1625
1626 RValue emitCall(const CIRGenFunctionInfo &funcInfo,
1627 const CIRGenCallee &callee, ReturnValueSlot returnValue,
1628 const CallArgList &args, cir::CIRCallOpInterface *callOp,
1629 mlir::Location loc);
1632 const CallArgList &args,
1633 cir::CIRCallOpInterface *callOrTryCall = nullptr) {
1634 assert(currSrcLoc && "source location must have been set");
1635 return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
1636 *currSrcLoc);
1637 }
1638
1639 RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
1641
1642 /// Emit the call and return for a thunk function.
1643 void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk,
1644 bool isUnprototyped);
1645
1646 void emitCallArg(CallArgList &args, const clang::Expr *e,
1647 clang::QualType argType);
1648 void emitCallArgs(
1649 CallArgList &args, PrototypeWrapper prototype,
1650 llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
1651 AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
1656
1657 template <typename T>
1658 mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
1659 mlir::ArrayAttr value,
1660 cir::CaseOpKind kind,
1661 bool buildingTopLevelCase);
1662
1664
1665 mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
1666 mlir::Type condType,
1667 bool buildingTopLevelCase);
1668
1669 LValue emitCastLValue(const CastExpr *e);
1670
1671 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
1672 /// sanitizer is enabled, a runtime check is also emitted.
1673 mlir::Value emitCheckedArgForAssume(const Expr *e);
1674
1675 /// Emit a conversion from the specified complex type to the specified
1676 /// destination type, where the destination type is an LLVM scalar type.
1677 mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
1678 QualType dstTy, SourceLocation loc);
1679
1682
1684
1685 mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
1686 cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1687 cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1688 cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
1689 cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
1690 mlir::Value coroframeAddr);
1691
1692 cir::CallOp emitCoroFreeBuiltin(const CallExpr *e);
1694
1695 void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
1696
1698
1699 mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
1700
1701 mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s);
1702
1704 AggValueSlot dest);
1705
1708 Address arrayBegin, const CXXConstructExpr *e,
1709 bool newPointerIsChecked,
1710 bool zeroInitialize = false);
1712 mlir::Value numElements, Address arrayBase,
1713 const CXXConstructExpr *e,
1714 bool newPointerIsChecked,
1715 bool zeroInitialize);
1717 clang::CXXCtorType type, bool forVirtualBase,
1718 bool delegating, AggValueSlot thisAVS,
1719 const clang::CXXConstructExpr *e);
1720
1722 clang::CXXCtorType type, bool forVirtualBase,
1723 bool delegating, Address thisAddr,
1725
1727 bool forVirtualBase, Address thisAddr,
1728 bool inheritedFromVBase,
1729 const CXXInheritedCtorInitExpr *e);
1730
1732 SourceLocation loc, const CXXConstructorDecl *d, CXXCtorType ctorType,
1733 bool forVirtualBase, bool delegating, CallArgList &args);
1734
1735 void emitCXXDeleteExpr(const CXXDeleteExpr *e);
1736
1738 bool forVirtualBase, bool delegating,
1739 Address thisAddr, QualType thisTy);
1740
1742 mlir::Value thisVal, QualType thisTy,
1743 mlir::Value implicitParam,
1744 QualType implicitParamTy, const CallExpr *e);
1745
1746 mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
1748
1751
1753 const Expr *e, Address base, mlir::Value memberPtr,
1754 const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
1755
1757 const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
1758 ReturnValueSlot returnValue, mlir::Value thisPtr,
1759 mlir::Value implicitParam, clang::QualType implicitParamTy,
1760 const clang::CallExpr *ce, CallArgList *rtlArgs);
1761
1763 const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
1764 ReturnValueSlot returnValue, bool hasQualifier,
1765 clang::NestedNameSpecifier qualifier, bool isArrow,
1766 const clang::Expr *base);
1767
1770
1771 mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
1772
1773 void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
1774 mlir::Type elementTy, Address beginPtr,
1775 mlir::Value numElements,
1776 mlir::Value allocSizeWithoutCookie);
1777
1778 /// Create a check for a function parameter that may potentially be
1779 /// declared as non-null.
1780 void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc,
1781 AbstractCallee ac, unsigned paramNum);
1782
1784 const CXXMethodDecl *md,
1786
1789
1791
1793 const CallExpr *callExpr,
1795
1796 void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
1797 Address ptr);
1798
1799 void emitCXXThrowExpr(const CXXThrowExpr *e);
1800
1802 virtual mlir::LogicalResult operator()(CIRGenFunction &cgf) = 0;
1803 virtual ~cxxTryBodyEmitter() = default;
1804 };
1805
1806 void emitBeginCatch(const CXXCatchStmt *catchStmt, mlir::Value ehToken);
1807
1808 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s,
1809 cxxTryBodyEmitter &bodyCallback);
1810 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
1811
1813 clang::CXXCtorType ctorType, FunctionArgList &args);
1814
1815 // It's important not to confuse this and emitDelegateCXXConstructorCall.
1816 // Delegating constructors are the C++11 feature. The constructor delegate
1817 // optimization is used to reduce duplication in the base and complete
1818 // constructors where they are substantially the same.
1820 const FunctionArgList &args);
1821
1822 void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
1823 QualType deleteTy);
1824
1825 mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
1826
1827 mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e);
1828 mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
1829
1830 /// Emit an expression as an initializer for an object (variable, field, etc.)
1831 /// at the given location. The expression is not necessarily the normal
1832 /// initializer for the object, and the address is not necessarily
1833 /// its normal location.
1834 ///
1835 /// \param init the initializing expression
1836 /// \param d the object to act as if we're initializing
1837 /// \param lvalue the lvalue to initialize
1838 /// \param capturedByInit true if \p d is a __block variable whose address is
1839 /// potentially changed by the initializer
1840 void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
1841 LValue lvalue, bool capturedByInit = false);
1842
1843 mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
1844
1845 mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
1846
1847 mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
1848
1850
1852 clang::Expr *init);
1853
1855
1856 mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
1857
1858 mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
1859
1860 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
1861
1862 void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
1863
1864 mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
1866 mlir::NamedAttrList attrs = {});
1867
1868 void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc);
1869
1870 /// Emit the computation of the specified expression of scalar type.
1871 mlir::Value emitScalarExpr(const clang::Expr *e,
1872 bool ignoreResultAssign = false);
1873
1874 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv);
1875
1876 /// Build a debug stoppoint if we are emitting debug info.
1877 void emitStopPoint(const Stmt *s);
1878
1879 // Build CIR for a statement. useCurrentScope should be true if no
1880 // new scopes need be created when finding a compound statement.
1881 mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
1882 llvm::ArrayRef<const Attr *> attrs = {});
1883
1884 mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
1885 bool useCurrentScope);
1886
1887 mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
1888
1889 void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
1890 CallArgList &callArgs);
1891
1892 RValue emitCoawaitExpr(const CoawaitExpr &e,
1893 AggValueSlot aggSlot = AggValueSlot::ignored(),
1894 bool ignoreResult = false);
1895
1896 RValue emitCoyieldExpr(const CoyieldExpr &e,
1897 AggValueSlot aggSlot = AggValueSlot::ignored(),
1898 bool ignoreResult = false);
1899 /// Emit the computation of the specified expression of complex type,
1900 /// returning the result.
1901 mlir::Value emitComplexExpr(const Expr *e);
1902
1903 void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
1904
1905 mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv);
1906
1907 LValue emitComplexAssignmentLValue(const BinaryOperator *e);
1908 LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
1909 LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
1910 mlir::Value &result);
1911
1912 mlir::LogicalResult
1913 emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
1914 AggValueSlot slot = AggValueSlot::ignored());
1915
1916 mlir::LogicalResult
1918 Address *lastValue = nullptr,
1919 AggValueSlot slot = AggValueSlot::ignored());
1920
1921 void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
1922 mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
1923 LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
1924
1925 mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
1926 mlir::Type condType,
1927 bool buildingTopLevelCase);
1928
1930 clang::CXXCtorType ctorType,
1931 const FunctionArgList &args,
1933
1934 /// We are performing a delegate call; that is, the current function is
1935 /// delegating to another one. Produce a r-value suitable for passing the
1936 /// given parameter.
1937 void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
1939
1940 /// Emit an `if` on a boolean condition to the specified blocks.
1941 /// FIXME: Based on the condition, this might try to simplify the codegen of
1942 /// the conditional based on the branch.
1943 /// In the future, we may apply code generation simplifications here,
1944 /// similar to those used in classic LLVM codegen
1945 /// See `EmitBranchOnBoolExpr` for inspiration.
1946 mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
1947 const clang::Stmt *thenS,
1948 const clang::Stmt *elseS);
1949 cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
1950 BuilderCallbackRef thenBuilder,
1951 mlir::Location thenLoc,
1952 BuilderCallbackRef elseBuilder,
1953 std::optional<mlir::Location> elseLoc = {});
1954
1955 mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
1956
1957 LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
1958
1959 mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
1960 mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
1961
1962 void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
1963 void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
1964
1965 mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
1966
1967 /// Emit code to compute the specified expression,
1968 /// ignoring the result.
1969 void emitIgnoredExpr(const clang::Expr *e);
1970
1971 RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
1972
1973 /// Load a complex number from the specified l-value.
1974 mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
1975
1976 RValue emitLoadOfExtVectorElementLValue(LValue lv);
1977
1978 /// Given an expression that represents a value lvalue, this method emits
1979 /// the address of the lvalue, then loads the result as an rvalue,
1980 /// returning the rvalue.
1981 RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
1982
1983 Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
1984 LValueBaseInfo *pointeeBaseInfo);
1985 LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
1986 QualType refTy, AlignmentSource source);
1987
1988 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1989 /// care to appropriately convert from the memory representation to
1990 /// the LLVM value representation. The l-value must be a simple
1991 /// l-value.
1992 mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
1993 mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
1994 SourceLocation loc, LValueBaseInfo baseInfo);
1995
1996 /// Emit code to compute a designator that specifies the location
1997 /// of the expression.
1998 /// FIXME: document this function better.
1999 LValue emitLValue(const clang::Expr *e);
2000 LValue emitLValueForBitField(LValue base, const FieldDecl *field);
2001 LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
2002
2003 LValue emitLValueForLambdaField(const FieldDecl *field);
2004 LValue emitLValueForLambdaField(const FieldDecl *field,
2005 mlir::Value thisValue);
2006
2007 /// Like emitLValueForField, excpet that if the Field is a reference, this
2008 /// will return the address of the reference and not the address of the value
2009 /// stored in the reference.
2010 LValue emitLValueForFieldInitialization(LValue base,
2011 const clang::FieldDecl *field,
2012 llvm::StringRef fieldName);
2013
2014 LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
2015
2016 LValue emitMemberExpr(const MemberExpr *e);
2017
2018 /// Emit a musttail call for a thunk with a potentially different ABI.
2019 void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr,
2020 cir::FuncOp callee);
2021
2022 /// Emit a call to an AMDGPU builtin function.
2023 std::optional<mlir::Value> emitAMDGPUBuiltinExpr(unsigned builtinID,
2024 const CallExpr *expr);
2025
2026 /// Emit a call to an NVPTX builtin function.
2027 std::optional<mlir::Value> emitNVPTXBuiltinExpr(unsigned builtinID,
2028 const CallExpr *expr);
2029
2030 LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
2031
2032 LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
2033
2034 /// Given an expression with a pointer type, emit the value and compute our
2035 /// best estimate of the alignment of the pointee.
2036 ///
2037 /// One reasonable way to use this information is when there's a language
2038 /// guarantee that the pointer must be aligned to some stricter value, and
2039 /// we're simply trying to ensure that sufficiently obvious uses of under-
2040 /// aligned objects don't get miscompiled; for example, a placement new
2041 /// into the address of a local variable. In such a case, it's quite
2042 /// reasonable to just ignore the returned alignment when it isn't from an
2043 /// explicit source.
2044 Address emitPointerWithAlignment(const clang::Expr *expr,
2045 LValueBaseInfo *baseInfo = nullptr);
2046
2047 /// Emits a reference binding to the passed in expression.
2048 RValue emitReferenceBindingToExpr(const Expr *e);
2049
2050 mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
2051
2052 RValue emitRotate(const CallExpr *e, bool isRotateLeft);
2053
2054 mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
2055
2056 /// Emit a conversion from the specified type to the specified destination
2057 /// type, both of which are CIR scalar types.
2058 mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
2059 clang::QualType dstType,
2060 clang::SourceLocation loc);
2061
2062 void emitScalarInit(const clang::Expr *init, mlir::Location loc,
2063 LValue lvalue, bool capturedByInit = false);
2064
2065 mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
2066 const Expr *argExpr);
2067
2068 void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
2069
2070 /// Emit a guarded initializer for a static local variable.
2071 void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp,
2072 bool performInit);
2073
2074 void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
2075 bool isInit);
2076
2077 void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
2078 clang::QualType ty, LValueBaseInfo baseInfo,
2079 bool isInit = false, bool isNontemporal = false);
2080 void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
2081
2082 void emitStoreThroughExtVectorComponentLValue(RValue src, LValue dst);
2083
2084 /// Store the specified rvalue into the specified
2085 /// lvalue, where both are guaranteed to the have the same type, and that
2086 /// type is 'Ty'.
2087 void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
2088
2089 mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
2090
2091 LValue emitStringLiteralLValue(const StringLiteral *e,
2092 llvm::StringRef name = ".str");
2093
2094 mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
2095 mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
2096 bool buildingTopLevelCase);
2097 mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
2098
2099 std::optional<mlir::Value>
2100 emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
2101 ReturnValueSlot &returnValue);
2102
2103 /// Given a value and its clang type, returns the value casted to its memory
2104 /// representation.
2105 /// Note: CIR defers most of the special casting to the final lowering passes
2106 /// to conserve the high level information.
2107 mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
2108
2109 /// EmitFromMemory - Change a scalar value from its memory
2110 /// representation to its value representation.
2111 mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty);
2112
2113 /// Emit a trap instruction, which is used to abort the program in an abnormal
2114 /// way, usually for debugging purposes.
2115 /// \p createNewBlock indicates whether to create a new block for the IR
2116 /// builder. Since the `cir.trap` operation is a terminator, operations that
2117 /// follow a trap cannot be emitted after `cir.trap` in the same block. To
2118 /// ensure these operations get emitted successfully, you need to create a new
2119 /// dummy block and set the insertion point there before continuing from the
2120 /// trap operation.
2121 void emitTrap(mlir::Location loc, bool createNewBlock);
2122
2123 LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
2124
2125 mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
2126
2127 /// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
2128 /// checking is enabled. Otherwise, just emit an unreachable instruction.
2129 /// \p createNewBlock indicates whether to create a new block for the IR
2130 /// builder. Since the `cir.unreachable` operation is a terminator, operations
2131 /// that follow an unreachable point cannot be emitted after `cir.unreachable`
2132 /// in the same block. To ensure these operations get emitted successfully,
2133 /// you need to create a dummy block and set the insertion point there before
2134 /// continuing from the unreachable point.
2135 void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
2136
2137 /// This method handles emission of any variable declaration
2138 /// inside a function, including static vars etc.
2139 void emitVarDecl(const clang::VarDecl &d);
2140
2141 void emitVariablyModifiedType(QualType ty);
2142
2143 mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
2144
2145 std::optional<mlir::Value> emitRISCVBuiltinExpr(unsigned builtinID,
2146 const CallExpr *expr);
2147
2148 std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
2149 const CallExpr *expr);
2150
2151 /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
2152 /// nonnull, if 1\p LHS is marked _Nonnull.
2153 void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
2154 clang::SourceLocation loc);
2155
2156 /// An object to manage conditionally-evaluated expressions.
2158 CIRGenFunction &cgf;
2159 mlir::OpBuilder::InsertPoint insertPt;
2160
2161 public:
2163 : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
2164 ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
2165 : cgf(cgf), insertPt(ip) {}
2166
2168 assert(cgf.outermostConditional != this);
2169 if (!cgf.outermostConditional)
2170 cgf.outermostConditional = this;
2171 }
2172
2174 assert(cgf.outermostConditional != nullptr);
2175 if (cgf.outermostConditional == this)
2176 cgf.outermostConditional = nullptr;
2177 }
2178
2179 /// Returns the insertion point which will be executed prior to each
2180 /// evaluation of the conditional code. In LLVM OG, this method
2181 /// is called getStartingBlock.
2182 mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
2183 };
2184
2186 std::optional<LValue> lhs{}, rhs{};
2187 mlir::Value result{};
2188 };
2189
2190 // Return true if we're currently emitting one branch or the other of a
2191 // conditional expression.
2192 bool isInConditionalBranch() const { return outermostConditional != nullptr; }
2193
2194 void setBeforeOutermostConditional(mlir::Value value, Address addr) {
2195 assert(isInConditionalBranch());
2196 {
2197 mlir::OpBuilder::InsertionGuard guard(builder);
2198 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
2199 builder.createStore(
2200 value.getLoc(), value, addr, /*isVolatile=*/false,
2201 mlir::IntegerAttr::get(
2202 mlir::IntegerType::get(value.getContext(), 64),
2203 (uint64_t)addr.getAlignment().getAsAlign().value()));
2204 }
2205 }
2206
2207 // Points to the outermost active conditional control. This is used so that
2208 // we know if a temporary should be destroyed conditionally.
2210
2211 /// An RAII object to record that we're evaluating a statement
2212 /// expression.
2214 CIRGenFunction &cgf;
2215
2216 /// We have to save the outermost conditional: cleanups in a
2217 /// statement expression aren't conditional just because the
2218 /// StmtExpr is.
2219 ConditionalEvaluation *savedOutermostConditional;
2220
2221 public:
2223 : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
2224 cgf.outermostConditional = nullptr;
2225 }
2226
2228 cgf.outermostConditional = savedOutermostConditional;
2229 }
2230 };
2231
2232 template <typename FuncTy>
2233 ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
2234 const FuncTy &branchGenFunc);
2235
2236 mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
2237 const clang::Stmt *thenS,
2238 const clang::Stmt *elseS);
2239
2240 /// Build a "reference" to a va_list; this is either the address or the value
2241 /// of the expression, depending on how va_list is defined.
2242 Address emitVAListRef(const Expr *e);
2243
2244 /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
2245 ///
2246 /// \param vaList A reference to the \c va_list as emitted by either
2247 /// \c emitVAListRef or \c emitMSVAListRef.
2248 void emitVAStart(mlir::Value vaList);
2249
2250 /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
2251 ///
2252 /// \param vaList A reference to the \c va_list as emitted by either
2253 /// \c emitVAListRef or \c emitMSVAListRef.
2254 void emitVAEnd(mlir::Value vaList);
2255
2256 /// Generate code to get an argument from the passed in pointer
2257 /// and update it accordingly.
2258 ///
2259 /// \param ve The \c VAArgExpr for which to generate code.
2260 ///
2261 /// \param vaListAddr Receives a reference to the \c va_list as emitted by
2262 /// either \c emitVAListRef or \c emitMSVAListRef.
2263 ///
2264 /// \returns SSA value with the argument.
2265 mlir::Value emitVAArg(VAArgExpr *ve);
2266
2267 /// ----------------------
2268 /// CIR build helpers
2269 /// -----------------
2270public:
2271 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2272 const Twine &name = "tmp",
2273 mlir::Value arraySize = nullptr,
2274 bool insertIntoFnEntryBlock = false);
2275 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2276 const Twine &name = "tmp",
2277 mlir::OpBuilder::InsertPoint ip = {},
2278 mlir::Value arraySize = nullptr);
2279 Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
2280 const Twine &name = "tmp",
2281 mlir::Value arraySize = nullptr,
2282 Address *alloca = nullptr,
2283 mlir::OpBuilder::InsertPoint ip = {});
2284 Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
2285 mlir::Location loc,
2286 const Twine &name = "tmp",
2287 mlir::Value arraySize = nullptr,
2288 mlir::OpBuilder::InsertPoint ip = {});
2289 Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc,
2290 const Twine &name);
2291
2292 /// Create a temporary memory object of the given type, with
2293 /// appropriate alignmen and cast it to the default address space. Returns
2294 /// the original alloca instruction by \p Alloca if it is not nullptr.
2295 Address createMemTemp(QualType t, mlir::Location loc,
2296 const Twine &name = "tmp", Address *alloca = nullptr,
2297 mlir::OpBuilder::InsertPoint ip = {});
2298 Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
2299 const Twine &name = "tmp", Address *alloca = nullptr,
2300 mlir::OpBuilder::InsertPoint ip = {});
2301
2302 mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const {
2303 if (cir::GlobalOp globalOp = v.getDefiningOp<cir::GlobalOp>())
2304 cgm.errorNYI("Global op addrspace cast");
2305 return builder.createAddrSpaceCast(v, destTy);
2306 }
2307
2308 //===--------------------------------------------------------------------===//
2309 // OpenMP Emission
2310 //===--------------------------------------------------------------------===//
2311public:
2312 mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
2313 mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
2314 mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
2315 mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
2316 mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
2317 mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
2318 mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
2319 mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
2320 mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
2321 mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
2322 mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
2323 mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
2324 mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
2325 mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
2326 mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
2327 mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
2328 mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
2329 mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
2330 mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
2331 mlir::LogicalResult
2332 emitOMPParallelForDirective(const OMPParallelForDirective &s);
2333 mlir::LogicalResult
2334 emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
2335 mlir::LogicalResult
2336 emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
2337 mlir::LogicalResult
2338 emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
2339 mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
2340 mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
2341 mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
2342 mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
2343 mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
2344 mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
2345 mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
2346 mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
2347 mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
2348 mlir::LogicalResult
2350 mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
2351 mlir::LogicalResult
2353 mlir::LogicalResult
2355 mlir::LogicalResult
2357 mlir::LogicalResult
2359 mlir::LogicalResult
2361 mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
2362 mlir::LogicalResult
2364 mlir::LogicalResult
2366 mlir::LogicalResult
2368 mlir::LogicalResult
2370 mlir::LogicalResult
2372 mlir::LogicalResult
2374 mlir::LogicalResult
2375 emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
2376 mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
2380 mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
2384 mlir::LogicalResult
2386 mlir::LogicalResult emitOMPDistributeParallelForDirective(
2390 mlir::LogicalResult
2394 mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
2396 mlir::LogicalResult
2398 mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
2400 mlir::LogicalResult
2402 mlir::LogicalResult
2404 mlir::LogicalResult
2410 mlir::LogicalResult
2412 mlir::LogicalResult
2414 mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
2422 mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
2423 mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
2424 mlir::LogicalResult
2426 mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
2427 mlir::LogicalResult emitOMPSplitDirective(const OMPSplitDirective &s);
2428 mlir::LogicalResult
2430 mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
2431 mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
2432 mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
2433
2437 void emitOMPAllocateDecl(const OMPAllocateDecl &d);
2440 void emitOMPRequiresDecl(const OMPRequiresDecl &d);
2441
2442private:
2443 template <typename Op>
2444 void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
2445
2446 //===--------------------------------------------------------------------===//
2447 // OpenACC Emission
2448 //===--------------------------------------------------------------------===//
2449private:
2450 template <typename Op>
2451 Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
2453 // Function to do the basic implementation of an operation with an Associated
2454 // Statement. Models AssociatedStmtConstruct.
2455 template <typename Op, typename TermOp>
2456 mlir::LogicalResult
2457 emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
2458 OpenACCDirectiveKind dirKind,
2460 const Stmt *associatedStmt);
2461
2462 template <typename Op, typename TermOp>
2463 mlir::LogicalResult emitOpenACCOpCombinedConstruct(
2464 mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
2465 llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
2466
2467 template <typename Op>
2468 void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
2470 // The second template argument doesn't need to be a template, since it should
2471 // always be an mlir::acc::LoopOp, but as this is a template anyway, we make
2472 // it a template argument as this way we can avoid including the OpenACC MLIR
2473 // headers here. We will count on linker failures/explicit instantiation to
2474 // ensure we don't mess this up, but it is only called from 1 place, and
2475 // instantiated 3x.
2476 template <typename ComputeOp, typename LoopOp>
2477 void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
2478 OpenACCDirectiveKind dirKind,
2480
2481 // The OpenACC LoopOp requires that we have auto, seq, or independent on all
2482 // LoopOp operations for the 'none' device type case. This function checks if
2483 // the LoopOp has one, else it updates it to have one.
2484 void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
2486
2487 // The OpenACC 'cache' construct actually applies to the 'loop' if present. So
2488 // keep track of the 'loop' so that we can add the cache vars to it correctly.
2489 mlir::acc::LoopOp *activeLoopOp = nullptr;
2490
2491 struct ActiveOpenACCLoopRAII {
2492 CIRGenFunction &cgf;
2493 mlir::acc::LoopOp *oldLoopOp;
2494
2495 ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
2496 : cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
2497 cgf.activeLoopOp = newOp;
2498 }
2499 ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
2500 };
2501
2502 // Keep track of the last place we inserted a 'recipe' so that we can insert
2503 // the next one in lexical order.
2504 mlir::OpBuilder::InsertPoint lastRecipeLocation;
2505
2506public:
2507 // Helper type used to store the list of important information for a 'data'
2508 // clause variable, or a 'cache' variable reference.
2510 mlir::Location beginLoc;
2511 mlir::Value varValue;
2512 std::string name;
2513 // The type of the original variable reference: that is, after 'bounds' have
2514 // removed pointers/array types/etc. So in the case of int arr[5], and a
2515 // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
2519 // The list of types that we found when going through the bounds, which we
2520 // can use to properly set the alloca section.
2522 };
2523
2524 // Gets the collection of info required to lower and OpenACC clause or cache
2525 // construct variable reference.
2527 // Helper function to emit the integer expressions as required by an OpenACC
2528 // clause/construct.
2529 mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
2530 // Helper function to emit an integer constant as an mlir int type, used for
2531 // constants in OpenACC constructs/clauses.
2532 mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
2533 int64_t value);
2534
2535 mlir::LogicalResult
2537 mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
2538 mlir::LogicalResult
2540 mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
2541 mlir::LogicalResult
2543 mlir::LogicalResult
2545 mlir::LogicalResult
2547 mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
2548 mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
2549 mlir::LogicalResult
2551 mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
2552 mlir::LogicalResult
2554 mlir::LogicalResult
2556 mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
2557
2560
2561 /// Create a temporary memory object for the given aggregate type.
2562 AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
2563 const Twine &name = "tmp",
2564 Address *alloca = nullptr) {
2566 return AggValueSlot::forAddr(
2567 createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
2570 }
2571
2572private:
2573 QualType getVarArgType(const Expr *arg);
2574
2575 class InlinedInheritingConstructorScope {
2576 public:
2577 InlinedInheritingConstructorScope(CIRGenFunction &cgf, GlobalDecl gd)
2578 : cgf(cgf), oldCurGD(cgf.curGD), oldCurFuncDecl(cgf.curFuncDecl),
2579 oldCurCodeDecl(cgf.curCodeDecl),
2580 oldCxxabiThisDecl(cgf.cxxabiThisDecl),
2581 oldCxxThisValue(cgf.cxxThisValue),
2582 oldCxxabiThisAlignment(cgf.cxxabiThisAlignment),
2583 oldCxxThisAlignment(cgf.cxxThisAlignment),
2584 oldReturnValue(cgf.returnValue), oldFnRetTy(cgf.fnRetTy),
2585 oldCxxInheritedCtorInitExprArgs(
2586 std::move(cgf.cxxInheritedCtorInitExprArgs)) {
2587 cgf.curGD = gd;
2589 cgf.curCodeDecl = cgf.curFuncDecl;
2590 cgf.cxxabiThisDecl = nullptr;
2591 cgf.cxxabiThisValue = nullptr;
2592 cgf.cxxThisValue = nullptr;
2596 cgf.fnRetTy = QualType();
2597 cgf.cxxInheritedCtorInitExprArgs.clear();
2598 // FIXME: at one point when we want to call one of these, we'll need
2599 // CXXInheritedCtorInitExprArgs here too.
2600 }
2601 ~InlinedInheritingConstructorScope() {
2602 cgf.curGD = oldCurGD;
2603 cgf.curFuncDecl = oldCurFuncDecl;
2604 cgf.curCodeDecl = oldCurCodeDecl;
2605 cgf.cxxabiThisDecl = oldCxxabiThisDecl;
2606 cgf.cxxabiThisValue = oldCxxabiThisValue;
2607 cgf.cxxThisValue = oldCxxThisValue;
2608 cgf.cxxThisAlignment = oldCxxThisAlignment;
2609 cgf.cxxabiThisAlignment = oldCxxabiThisAlignment;
2610 cgf.returnValue = oldReturnValue;
2611 cgf.fnRetTy = oldFnRetTy;
2613 std::move(oldCxxInheritedCtorInitExprArgs);
2614 }
2615
2616 private:
2617 CIRGenFunction &cgf;
2618 GlobalDecl oldCurGD;
2619 const Decl *oldCurFuncDecl;
2620 const Decl *oldCurCodeDecl;
2621 ImplicitParamDecl *oldCxxabiThisDecl;
2622 mlir::Value oldCxxabiThisValue;
2623 mlir::Value oldCxxThisValue;
2624 clang::CharUnits oldCxxabiThisAlignment;
2625 clang::CharUnits oldCxxThisAlignment;
2626 Address oldReturnValue;
2627 QualType oldFnRetTy;
2628 CallArgList oldCxxInheritedCtorInitExprArgs;
2629 };
2630};
2631
2632} // namespace clang::CIRGen
2633
2634#endif
Defines the clang::ASTContext interface.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
Defines the clang::Expr interface and subclasses for C++ expressions.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
Enumerates target-specific builtins in their own namespaces within namespace clang.
C Language Family Type Representation.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
This represents 'pragma omp cancel' directive.
This represents 'pragma omp cancellation point' directive.
This represents 'pragma omp dispatch' directive.
This represents 'pragma omp distribute' directive.
This represents 'pragma omp distribute parallel for' composite directive.
This represents 'pragma omp distribute parallel for simd' composite directive.
This represents 'pragma omp distribute simd' composite directive.
This represents 'pragma omp error' directive.
Represents the 'pragma omp fuse' loop transformation directive.
This represents 'pragma omp loop' directive.
Represents the 'pragma omp interchange' loop transformation directive.
This represents 'pragma omp interop' directive.
This represents 'pragma omp masked' directive.
This represents 'pragma omp masked taskloop' directive.
This represents 'pragma omp masked taskloop simd' directive.
This represents 'pragma omp master taskloop' directive.
This represents 'pragma omp master taskloop simd' directive.
This represents 'pragma omp metadirective' directive.
This represents 'pragma omp parallel loop' directive.
This represents 'pragma omp parallel masked taskloop' directive.
This represents 'pragma omp parallel masked taskloop simd' directive.
This represents 'pragma omp parallel master taskloop' directive.
This represents 'pragma omp parallel master taskloop simd' directive.
Represents the 'pragma omp reverse' loop transformation directive.
This represents 'pragma omp scan' directive.
Represents the 'pragma omp split' loop transformation directive.
This represents the 'pragma omp stripe' loop transformation directive.
This represents 'pragma omp target data' directive.
This represents 'pragma omp target' directive.
This represents 'pragma omp target enter data' directive.
This represents 'pragma omp target exit data' directive.
This represents 'pragma omp target parallel' directive.
This represents 'pragma omp target parallel for' directive.
This represents 'pragma omp target parallel for simd' directive.
This represents 'pragma omp target parallel loop' directive.
This represents 'pragma omp target simd' directive.
This represents 'pragma omp target teams' directive.
This represents 'pragma omp target teams distribute' combined directive.
This represents 'pragma omp target teams distribute parallel for' combined directive.
This represents 'pragma omp target teams distribute parallel for simd' combined directive.
This represents 'pragma omp target teams distribute simd' combined directive.
This represents 'pragma omp target teams loop' directive.
This represents 'pragma omp target update' directive.
This represents 'pragma omp taskloop' directive.
This represents 'pragma omp taskloop simd' directive.
This represents 'pragma omp teams' directive.
This represents 'pragma omp teams distribute' directive.
This represents 'pragma omp teams distribute parallel for' composite directive.
This represents 'pragma omp teams distribute parallel for simd' composite directive.
This represents 'pragma omp teams distribute simd' combined directive.
This represents 'pragma omp teams loop' directive.
This represents the 'pragma omp tile' loop transformation directive.
This represents the 'pragma omp unroll' loop transformation directive.
This class represents a 'loop' construct. The 'loop' construct applies to a 'for' loop (or range-for ...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3777
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3283
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6929
Represents an attribute applied to a statement.
Definition Stmt.h:2209
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition Expr.h:4456
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition Expr.h:4494
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition Expr.h:4491
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
BreakStmt - This represents a break.
Definition Stmt.h:3141
mlir::Value getPointer() const
Definition Address.h:96
static Address invalid()
Definition Address.h:74
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Value getBasePointer() const
Definition Address.h:101
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
An abstract representation of regular/ObjC call/message targets.
AbstractCallee(const clang::FunctionDecl *fd)
const clang::ParmVarDecl * getParamDecl(unsigned I) const
ArrayInitLoopExprScope(CIRGenFunction &cgf, bool setIdx, mlir::Value index)
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
An object to manage conditionally-evaluated expressions.
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
mlir::OpBuilder::InsertPoint getInsertPoint() const
Returns the insertion point which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forReference(mlir::TypedAttr c)
static ConstantEmission forValue(mlir::TypedAttr c)
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
FullExprCleanupScope(CIRGenFunction &cgf, const Expr *subExpr)
void exit(ArrayRef< mlir::Value * > valuesToReload={})
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, LValue lvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RunCleanupsScope(CIRGenFunction &cgf)
Enter a new cleanup scope.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool hasPendingCleanups() const
Whether there are any pending cleanups that have been pushed since this scope was entered.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void restore()
Can be used to restore the state early, before the dtor is run.
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value)
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
static bool hasScalarEvaluationKind(clang::QualType type)
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
void emitOpenACCRoutine(const OpenACCRoutineDecl &d)
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, cir::GlobalOp gv, cir::GetGlobalOp gvAddr)
Add the initializer for 'd' to the global variable that has already been created for it.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo, GlobalDecl gd, const ThunkInfo &thunk, bool isUnprototyped)
Generate code for a thunk function.
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard SourceLocExprScopeGuard
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, mlir::OpBuilder::InsertPoint ip={})
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
ImplicitParamDecl * cxxabiThisDecl
CXXThisDecl - When generating code for a C++ member function, this will hold the implicit 'this' decl...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
bool curFuncIsThunk
In C++, whether we are code generating a thunk.
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
llvm::SmallVector< PendingCleanupEntry > lifetimeExtendedCleanupStack
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
llvm::ScopedHashTable< const clang::Decl *, mlir::Value > SymTableTy
The symbol table maps a variable name to a value in the current scope.
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc)
Definition CIRGenCXX.cpp:33
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, CallArgList &callArgs)
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
void emitOMPRequiresDecl(const OMPRequiresDecl &d)
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
Address cxxDefaultInitExprThis
The value of 'this' to sue when evaluating CXXDefaultInitExprs within this expression.
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
void setBeforeOutermostConditional(mlir::Value value, Address addr)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::Value loadCXXThis()
Load the value for 'this'.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void addCatchHandlerAttr(const CXXCatchStmt *catchStmt, SmallVector< mlir::Attribute > &handlerAttrs)
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
const clang::Decl * curFuncDecl
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
LValue emitLValueForLambdaField(const FieldDecl *field)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOrTryCall=nullptr)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
bool isTrivialInitializer(const Expr *init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void emitOpenACCDeclare(const OpenACCDeclareDecl &d)
void emitInlinedInheritingCXXConstructorCall(SourceLocation loc, const CXXConstructorDecl *d, CXXCtorType ctorType, bool forVirtualBase, bool delegating, CallArgList &args)
Address getAddrOfLocalVar(const clang::VarDecl *vd)
Return the address of a local variable.
void emitAnyExprToExn(const Expr *e, Address addr)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr, cir::FuncOp callee)
Emit a musttail call for a thunk with a potentially different ABI.
void pushIrregularPartialArrayCleanup(mlir::Value arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Push an EH cleanup to destroy already-constructed elements of the given array.
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy)
void pushCleanupAndDeferDeactivation(CleanupKind kind, As... a)
Push a cleanup and record it for deferred deactivation.
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
void emitBeginCatch(const CXXCatchStmt *catchStmt, mlir::Value ehToken)
Begins a catch statement by initializing the catch variable and calling __cxa_begin_catch.
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, bool delegating)
Return the VTT parameter that should be passed to a base constructor/destructor with virtual bases.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
void initializeVTablePointers(mlir::Location loc, const clang::CXXRecordDecl *rd)
mlir::Type convertType(const TypeDecl *t)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void emitStoreThroughExtVectorComponentLValue(RValue src, LValue dst)
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d)
void emitAggregateStore(mlir::Value value, Address dest)
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
ConditionalEvaluation * outermostConditional
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit)
RValue emitAtomicExpr(AtomicExpr *e)
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, LValue lvalue, bool capturedByInit=false)
Emit an expression as an initializer for an object (variable, field, etc.) at the given location.
void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp, bool performInit)
Emit a guarded initializer for a static local variable.
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
cir::CallOp emitCoroFreeBuiltin(const CallExpr *e)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
llvm::SmallVector< DeferredDeactivateCleanup > deferredDeactivationCleanupStack
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass)
const TargetCIRGenInfo & getTargetHooks() const
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
RValue emitCoyieldExpr(const CoyieldExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
std::optional< mlir::Value > emitRISCVBuiltinExpr(unsigned builtinID, const CallExpr *expr)
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e)
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
CleanupKind getCleanupKind(QualType::DestructionKind kind)
clang::CharUnits cxxabiThisAlignment
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
std::pair< mlir::Value, mlir::Type > emitAsmInputLValue(const TargetInfo::ConstraintInfo &info, LValue inputValue, QualType inputType, std::string &constraintString, SourceLocation loc)
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
void emitOMPAllocateDecl(const OMPAllocateDecl &d)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
CallArgList cxxInheritedCtorInitExprArgs
The values of function arguments to use when evaluating CXXInheritedCtorInitExprs within this context...
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType)
ImplicitParamDecl * cxxStructorImplicitParamDecl
When generating code for a constructor or destructor, this will hold the implicit argument (e....
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
void startThunk(cir::FuncOp fn, GlobalDecl gd, const CIRGenFunctionInfo &fnInfo, bool isUnprototyped)
Start generating a thunk function.
RValue emitAtomicLoad(LValue lvalue, SourceLocation loc, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
llvm::SmallVector< PendingCleanupEntry > deferredConditionalCleanupStack
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::Type convertTypeForMem(QualType t)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s, cxxTryBodyEmitter &bodyCallback)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
LValue emitAggExprToLValue(const Expr *e)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
clang::CurrentSourceLocExprScope curSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
mlir::Value loadCXXVTT()
Load the VTT parameter to base constructors/destructors have virtual bases.
void emitVarDecl(const clang::VarDecl &d)
This method handles emission of any variable declaration inside a function, including static vars etc...
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts, mlir::Location loc)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr, SmallVectorImpl< mlir::Value > &ops, clang::SVETypeFlags typeFlags)
Address returnValue
The temporary alloca to hold the return value.
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo)
void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk, bool isUnprototyped)
Emit the call and return for a thunk function.
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
mlir::Value getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr, const clang::CXXRecordDecl *vtableClass)
Return the Value of the vtable pointer member pointed to by thisAddr.
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Destroys all the elements of the given array, beginning from last to first.
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
RValue emitAnyExprToTemp(const clang::Expr *e)
Similarly to emitAnyExpr(), however, the result will always be accessible even if no aggregate locati...
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
void initFullExprCleanupWithFlag(Address activeFlag)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS)
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::SmallPtrSet< const clang::CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
Address createCleanupActiveFlag()
Create an active flag variable for use with conditional cleanups.
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
bool hasVolatileMember(QualType t)
returns true if aggregate type has a volatile member.
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
void emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType)
clang::FieldDecl * lambdaThisCaptureField
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
void emitConstructorBody(FunctionArgList &args)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
bool haveInsertPoint() const
True if an insertion point is defined.
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void maybeEmitDeferredVarDeclInit(const VarDecl *vd)
llvm::SmallDenseMap< const ParmVarDecl *, const ImplicitParamDecl * > sizeArguments
If a ParmVarDecl had the pass_object_size attribute, this will contain a mapping from said ParmVarDec...
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
std::optional< mlir::Value > emitAArch64SMEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void pushStackRestore(CleanupKind kind, Address spMem)
LValue emitPseudoObjectLValue(const PseudoObjectExpr *E)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
void emitAutoVarDecl(const clang::VarDecl &d)
Emit code and set up symbol table for a variable declaration with auto, register, or no storage class...
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
bool didCallStackSave
Whether a cir.stacksave operation has been added.
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
mlir::Value emitOpenACCIntExpr(const Expr *intExpr)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer)
Immediately perform the destruction of the given object.
void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc, AbstractCallee ac, unsigned paramNum)
Create a check for a function parameter that may potentially be declared as non-null.
void pushPendingCleanupToEHStack(const PendingCleanupEntry &entry)
Promote a single pending cleanup entry onto the EH scope stack.
mlir::LogicalResult emitOMPSplitDirective(const OMPSplitDirective &s)
const CIRGenModule & getCIRGenModule() const
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, llvm::ArrayRef< mlir::Value > args={}, mlir::NamedAttrList attrs={})
void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty)
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitInitListLValue(const InitListExpr *e)
mlir::Value arrayInitIndex
The current array initialization index when evaluating an ArrayInitIndexExpr within an ArrayInitLoopE...
void emitAtomicInit(Expr *init, LValue dest)
void popCleanupBlock(bool forDeactivation=false)
Pop a cleanup block from the stack.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, int64_t value)
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
void pushEHDestroyIfNeeded(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushEHDestroyIfNeeded - Push the standard destructor for the given type as an EH-only cleanup.
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d)
std::optional< mlir::Value > emitNVPTXBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an NVPTX builtin function.
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
void emitCXXThrowExpr(const CXXThrowExpr *e)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
llvm::SmallVector< VPtr, 4 > VPtrsVector
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
bool sawAsmBlock
Whether or not a Microsoft-style asm block has been processed within this fuction.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
std::pair< mlir::Value, mlir::Type > emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr, std::string &constraintString)
EHScopeStack::stable_iterator currentCleanupStackDepth
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
void pushFullExprCleanup(CleanupKind kind, As... a)
Push a cleanup to be run at the end of the current full-expression.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc)
We are performing a delegate call; that is, the current function is delegating to another one.
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
const CIRGenFunctionInfo * curFnInfo
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc)
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
void emitInheritedCXXConstructorCall(const CXXConstructorDecl *d, bool forVirtualBase, Address thisAddr, bool inheritedFromVBase, const CXXInheritedCtorInitExpr *e)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
void pushCleanupAfterFullExpr(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer)
Queue a cleanup to be pushed after finishing the current full-expression.
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
std::optional< mlir::Value > emitAArch64SVEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args)
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize=nullptr)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
void finishThunk()
Finish generating a thunk function.
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
void emitAutoVarCleanups(const AutoVarEmission &emission)
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:50
A saved depth on the scope stack.
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a call to a CUDA kernel function.
Definition ExprCXX.h:238
CXXCatchStmt - This represents a C++ catch block.
Definition StmtCXX.h:28
Represents a call to a C++ constructor.
Definition ExprCXX.h:1552
Represents a C++ constructor within a class.
Definition DeclCXX.h:2620
Represents a C++ base or member initializer.
Definition DeclCXX.h:2385
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1274
A use of a default initializer in a constructor or in aggregate initialization.
Definition ExprCXX.h:1381
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2630
Represents a C++ destructor within a class.
Definition DeclCXX.h:2882
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:485
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition ExprCXX.h:1755
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:183
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2132
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2359
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:85
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2749
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
Represents a C++ temporary.
Definition ExprCXX.h:1463
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1212
CXXTryStmt - A C++ try block, including all handlers.
Definition StmtCXX.h:69
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:852
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
CaseStmt - Represent a case statement.
Definition Stmt.h:1926
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1746
ContinueStmt - This represents a continue.
Definition Stmt.h:3125
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
SourceLocExprScopeGuard(const Expr *DefaultExpr, CurrentSourceLocExprScope &Current)
Represents the current source location and context used to determine the value of the source location...
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1637
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2838
This represents one expression.
Definition Expr.h:112
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6611
Represents a member of a struct/union/class.
Definition Decl.h:3178
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2894
Represents a function declaration or definition.
Definition Decl.h:2018
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
GotoStmt - This represents a direct goto.
Definition Stmt.h:2975
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3014
Describes an C or C++ initializer list.
Definition Expr.h:5302
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3708
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents 'pragma omp allocate ...' directive.
Definition DeclOpenMP.h:536
Pseudo declaration for capturing expressions.
Definition DeclOpenMP.h:445
This represents 'pragma omp declare mapper ...' directive.
Definition DeclOpenMP.h:349
This represents 'pragma omp declare reduction ...' directive.
Definition DeclOpenMP.h:239
This represents 'pragma omp groupprivate ...' directive.
Definition DeclOpenMP.h:173
This represents 'pragma omp requires...' directive.
Definition DeclOpenMP.h:479
This represents 'pragma omp threadprivate ...' directive.
Definition DeclOpenMP.h:110
ObjCMethodDecl - Represents an instance or class method declaration.
Definition DeclObjC.h:140
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
Represents a parameter to a function.
Definition Decl.h:1808
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6805
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8476
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Flags to identify the types for overloaded SVE builtins.
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:227
Represents a declaration of a type.
Definition Decl.h:3531
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:924
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4021
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
const internal::VariadicDynCastAllOfMatcher< Decl, VarDecl > varDecl
Matches variable declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
@ Address
A pointer to a ValueDecl.
Definition Primitives.h:28
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
OpenACCDirectiveKind
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start, SourceLocation DirectiveLoc, SourceLocation End, ArrayRef< const OpenACCClause * > Clauses, Stmt *StructuredBlock)
CXXDtorType
C++ destructor types.
Definition ABI.h:34
U cast(CodeGen::Address addr)
Definition Address.h:327
#define true
Definition stdbool.h:25
static bool aggValueSlot()
static bool peepholeProtection()
static bool opAllocaEscapeByReference()
static bool generateDebugInfo()
AutoVarEmission(const clang::VarDecl &variable)
bool isEscapingByRef
True if the variable is a __block variable that is captured by an escaping block.
Address addr
The address of the alloca for languages with explicit address space (e.g.
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
bool isConstantAggregate
True if the variable is of aggregate type and has a constant initializer.
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
Address getObjectAddress(CIRGenFunction &cgf) const
Returns the address of the object within this declaration.
std::unique_ptr< CGCoroData > data
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
Scope that deactivates all enclosed deferred cleanups on exit.
A cleanup that was pushed to the EH stack but whose deactivation is deferred until the enclosing Clea...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
mlir::Block * getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc)
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
void updateRetLoc(mlir::Block *b, mlir::Location loc)
mlir::Location getRetLoc(mlir::Block *b)
A cleanup entry that will be promoted onto the EH scope stack at a later point.
llvm::PointerUnion< const clang::FunctionProtoType *, const clang::ObjCMethodDecl * > p
PrototypeWrapper(const clang::ObjCMethodDecl *md)
PrototypeWrapper(const clang::FunctionProtoType *ft)
const clang::CXXRecordDecl * vtableClass
const clang::CXXRecordDecl * nearestVBase
VlaSizePair(mlir::Value num, QualType ty)
virtual mlir::LogicalResult operator()(CIRGenFunction &cgf)=0
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition Thunk.h:157