clang 23.0.0git
CIRGenFunction.h
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
14#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
15
16#include "CIRGenBuilder.h"
17#include "CIRGenCall.h"
18#include "CIRGenModule.h"
19#include "CIRGenTypeCache.h"
20#include "CIRGenValue.h"
21#include "EHScopeStack.h"
22
23#include "Address.h"
24
27#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/ExprCXX.h"
31#include "clang/AST/Stmt.h"
32#include "clang/AST/Type.h"
38#include "llvm/ADT/ScopedHashTable.h"
39#include "llvm/IR/Instructions.h"
40
41namespace {
42class ScalarExprEmitter;
43} // namespace
44
45namespace mlir {
46namespace acc {
47class LoopOp;
48} // namespace acc
49} // namespace mlir
50
51namespace clang::CIRGen {
52
53struct CGCoroData;
54
56public:
58
59private:
60 friend class ::ScalarExprEmitter;
61 /// The builder is a helper class to create IR inside a function. The
62 /// builder is stateful, in particular it keeps an "insertion point": this
63 /// is where the next operations will be introduced.
64 CIRGenBuilderTy &builder;
65
66public:
67 /// The GlobalDecl for the current function being compiled or the global
68 /// variable currently being initialized.
70
72
73 /// The compiler-generated variable that holds the return value.
74 std::optional<mlir::Value> fnRetAlloca;
75
76 // Holds coroutine data if the current function is a coroutine. We use a
77 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
78 // in this header.
79 struct CGCoroInfo {
80 std::unique_ptr<CGCoroData> data;
81 CGCoroInfo();
83 };
85
86 bool isCoroutine() const { return curCoro.data != nullptr; }
87
88 /// The temporary alloca to hold the return value. This is
89 /// invalid iff the function has no return value.
91
92 /// Tracks function scope overall cleanup handling.
94
95 typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
96
97 /// A cleanup entry that will be promoted onto the EH scope stack at a later
98 /// point. Used by both the lifetime-extended cleanup stack (promoted when
99 /// the enclosing scope exits) and the deferred conditional cleanup stack
100 /// (promoted at the enclosing full-expression level).
101 ///
102 /// Currently only DestroyObject cleanups use this. When other cleanup types
103 /// are needed (e.g., CallLifetimeEnd), this struct can be extended with a
104 /// std::variant of cleanup data types.
112
114
116
117 /// A cleanup that was pushed to the EH stack but whose deactivation is
118 /// deferred until the enclosing CleanupDeactivationScope exits. Used to
119 /// protect partially-constructed aggregates (e.g. lambda captures) so that
120 /// already-initialized sub-objects are destroyed if a later initializer
121 /// throws, while avoiding double-destruction after full construction.
127
128 /// Scope that deactivates all enclosed deferred cleanups on exit.
129 /// Mirrors CodeGenFunction::CleanupDeactivationScope in classic codegen.
133 bool deactivated = false;
134
138
140 assert(!deactivated && "Deactivating already deactivated scope");
141 auto &stack = cgf.deferredDeactivationCleanupStack;
142 for (size_t i = stack.size(); i > oldDeactivateCleanupStackSize; i--) {
143 cgf.deactivateCleanupBlock(stack[i - 1].cleanup,
144 stack[i - 1].dominatingIP);
145 stack[i - 1].dominatingIP->erase();
146 }
147 stack.resize(oldDeactivateCleanupStackSize);
148 deactivated = true;
149 }
150
155 };
156
158
159 /// A mapping from NRVO variables to the flags used to indicate
160 /// when the NRVO has been applied to this variable.
161 llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
162
163 llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
166
167 /// CXXThisDecl - When generating code for a C++ member function,
168 /// this will hold the implicit 'this' declaration.
170 mlir::Value cxxabiThisValue = nullptr;
171 mlir::Value cxxThisValue = nullptr;
174
175 /// When generating code for a constructor or destructor, this will hold the
176 /// implicit argument (e.g. VTT).
179
180 /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
181 /// expression.
183
184 /// The values of function arguments to use when evaluating
185 /// CXXInheritedCtorInitExprs within this context.
187
188 /// The current array initialization index when evaluating an
189 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
190 mlir::Value arrayInitIndex = nullptr;
191
192 // Holds the Decl for the current outermost non-closure context
193 const clang::Decl *curFuncDecl = nullptr;
194 /// This is the inner-most code context, which includes blocks.
195 const clang::Decl *curCodeDecl = nullptr;
198
199 /// The current function or global initializer that is generated code for.
200 /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
201 /// global initializers.
202 mlir::Operation *curFn = nullptr;
203
204 /// Save Parameter Decl for coroutine.
206
207 using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
208 /// This keeps track of the CIR allocas or globals for local C
209 /// declarations.
211
212 /// The type of the condition for the emitting switch statement.
214
215 clang::ASTContext &getContext() const { return cgm.getASTContext(); }
216
217 CIRGenBuilderTy &getBuilder() { return builder; }
218
220 const CIRGenModule &getCIRGenModule() const { return cgm; }
221
223 // We currently assume this isn't called for a global initializer.
224 auto fn = mlir::cast<cir::FuncOp>(curFn);
225 return &fn.getRegion().front();
226 }
227
228 /// Sanitizers enabled for this function.
230
232 public:
236
237 private:
238 void ConstructorHelper(clang::FPOptions FPFeatures);
239 CIRGenFunction &cgf;
240 clang::FPOptions oldFPFeatures;
241 llvm::fp::ExceptionBehavior oldExcept;
242 llvm::RoundingMode oldRounding;
243 };
245
246 /// The symbol table maps a variable name to a value in the current scope.
247 /// Entering a function creates a new scope, and the function arguments are
248 /// added to the mapping. When the processing of a function is terminated,
249 /// the scope is destroyed and the mappings created in this scope are
250 /// dropped.
251 using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
253
254 /// Whether a cir.stacksave operation has been added. Used to avoid
255 /// inserting cir.stacksave for multiple VLAs in the same scope.
256 bool didCallStackSave = false;
257
258 /// Whether or not a Microsoft-style asm block has been processed within
259 /// this fuction. These can potentially set the return value.
260 bool sawAsmBlock = false;
261
262 /// In C++, whether we are code generating a thunk. This controls whether we
263 /// should emit cleanups.
264 bool curFuncIsThunk = false;
265
266 mlir::Type convertTypeForMem(QualType t);
267
268 mlir::Type convertType(clang::QualType t);
269 mlir::Type convertType(const TypeDecl *t) {
270 return convertType(getContext().getTypeDeclType(t));
271 }
272
273 /// Get integer from a mlir::Value that is an int constant or a constant op.
274 static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
275 auto constOp = val.getDefiningOp<cir::ConstantOp>();
276 assert(constOp && "getSExtIntValueFromConstOp call with non ConstantOp");
277 return constOp.getIntValue().getSExtValue();
278 }
279
280 /// Get zero-extended integer from a mlir::Value that is an int constant or a
281 /// constant op.
282 static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
283 auto constOp = val.getDefiningOp<cir::ConstantOp>();
284 assert(constOp && "getZExtIntValueFromConstOp call with non ConstantOp");
285 return constOp.getIntValue().getZExtValue();
286 }
287
288 /// Return the cir::TypeEvaluationKind of QualType \c type.
290
294
298
300 bool suppressNewContext = false);
302
303 CIRGenTypes &getTypes() const { return cgm.getTypes(); }
304
305 const TargetInfo &getTarget() const { return cgm.getTarget(); }
306 mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
307
309 return cgm.getTargetCIRGenInfo();
310 }
311
312 // ---------------------
313 // Opaque value handling
314 // ---------------------
315
316 /// Keeps track of the current set of opaque value expressions.
317 llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
318 llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
319
320 // This keeps track of the associated size for each VLA type.
321 // We track this by the size expression rather than the type itself because
322 // in certain situations, like a const qualifier applied to an VLA typedef,
323 // multiple VLA types can share the same size expression.
324 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
325 // enter/leave scopes.
326 llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
327
328public:
329 /// A non-RAII class containing all the information about a bound
330 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
331 /// this which makes individual mappings very simple; using this
332 /// class directly is useful when you have a variable number of
333 /// opaque values or don't want the RAII functionality for some
334 /// reason.
335 class OpaqueValueMappingData {
336 const OpaqueValueExpr *opaqueValue;
337 bool boundLValue;
338
339 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
340 : opaqueValue(ov), boundLValue(boundLValue) {}
341
342 public:
343 OpaqueValueMappingData() : opaqueValue(nullptr) {}
344
345 static bool shouldBindAsLValue(const Expr *expr) {
346 // gl-values should be bound as l-values for obvious reasons.
347 // Records should be bound as l-values because IR generation
348 // always keeps them in memory. Expressions of function type
349 // act exactly like l-values but are formally required to be
350 // r-values in C.
351 return expr->isGLValue() || expr->getType()->isFunctionType() ||
353 }
354
356 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
357 if (shouldBindAsLValue(ov))
358 return bind(cgf, ov, cgf.emitLValue(e));
359 return bind(cgf, ov, cgf.emitAnyExpr(e));
360 }
361
363 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
364 assert(shouldBindAsLValue(ov));
365 cgf.opaqueLValues.insert(std::make_pair(ov, lv));
366 return OpaqueValueMappingData(ov, true);
367 }
368
370 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
371 assert(!shouldBindAsLValue(ov));
372 cgf.opaqueRValues.insert(std::make_pair(ov, rv));
373
374 OpaqueValueMappingData data(ov, false);
375
376 // Work around an extremely aggressive peephole optimization in
377 // EmitScalarConversion which assumes that all other uses of a
378 // value are extant.
380 return data;
381 }
382
383 bool isValid() const { return opaqueValue != nullptr; }
384 void clear() { opaqueValue = nullptr; }
385
387 assert(opaqueValue && "no data to unbind!");
388
389 if (boundLValue) {
390 cgf.opaqueLValues.erase(opaqueValue);
391 } else {
392 cgf.opaqueRValues.erase(opaqueValue);
394 }
395 }
396 };
397
398 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
400 CIRGenFunction &cgf;
402
403 public:
407
408 /// Build the opaque value mapping for the given conditional
409 /// operator if it's the GNU ?: extension. This is a common
410 /// enough pattern that the convenience operator is really
411 /// helpful.
412 ///
415 : cgf(cgf) {
416 if (mlir::isa<ConditionalOperator>(op))
417 // Leave Data empty.
418 return;
419
421 mlir::cast<BinaryConditionalOperator>(op);
423 e->getCommon());
424 }
425
426 /// Build the opaque value mapping for an OpaqueValueExpr whose source
427 /// expression is set to the expression the OVE represents.
429 : cgf(cgf) {
430 if (ov) {
431 assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
432 "for OVE with no source expression");
433 data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
434 }
435 }
436
438 LValue lvalue)
439 : cgf(cgf),
440 data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
441
443 RValue rvalue)
444 : cgf(cgf),
445 data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
446
447 void pop() {
448 data.unbind(cgf);
449 data.clear();
450 }
451
453 if (data.isValid())
454 data.unbind(cgf);
455 }
456 };
457
458private:
459 /// Declare a variable in the current scope, return success if the variable
460 /// wasn't declared yet.
461 void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
462 mlir::Location loc, clang::CharUnits alignment,
463 bool isParam = false);
464
465public:
466 mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
467
468 void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
469
470private:
471 // Track current variable initialization (if there's one)
472 const clang::VarDecl *currVarDecl = nullptr;
473 class VarDeclContext {
475 const clang::VarDecl *oldVal = nullptr;
476
477 public:
478 VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
479 if (p.currVarDecl)
480 oldVal = p.currVarDecl;
481 p.currVarDecl = value;
482 }
483
484 /// Can be used to restore the state early, before the dtor
485 /// is run.
486 void restore() { p.currVarDecl = oldVal; }
487 ~VarDeclContext() { restore(); }
488 };
489
490public:
491 /// Use to track source locations across nested visitor traversals.
492 /// Always use a `SourceLocRAIIObject` to change currSrcLoc.
493 std::optional<mlir::Location> currSrcLoc;
495 CIRGenFunction &cgf;
496 std::optional<mlir::Location> oldLoc;
497
498 public:
499 SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
500 if (cgf.currSrcLoc)
501 oldLoc = cgf.currSrcLoc;
502 cgf.currSrcLoc = value;
503 }
504
505 /// Can be used to restore the state early, before the dtor
506 /// is run.
507 void restore() { cgf.currSrcLoc = oldLoc; }
509 };
510
512 llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
513
514 /// Hold counters for incrementally naming temporaries
515 unsigned counterRefTmp = 0;
516 unsigned counterAggTmp = 0;
517 std::string getCounterRefTmpAsString();
518 std::string getCounterAggTmpAsString();
519
520 /// Helpers to convert Clang's SourceLocation to a MLIR Location.
521 mlir::Location getLoc(clang::SourceLocation srcLoc);
522 mlir::Location getLoc(clang::SourceRange srcLoc);
523 mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
524
525 const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
526
527 /// True if an insertion point is defined. If not, this indicates that the
528 /// current code being emitted is unreachable.
529 /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
530 /// since we don't yet force null insertion point to designate behavior (like
531 /// LLVM's codegen does) and we probably shouldn't.
532 bool haveInsertPoint() const {
533 return builder.getInsertionBlock() != nullptr;
534 }
535
536 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
537 // an ObjCMethodDecl.
539 llvm::PointerUnion<const clang::FunctionProtoType *,
540 const clang::ObjCMethodDecl *>
542
545 };
546
548
549 /// An abstract representation of regular/ObjC call/message targets.
551 /// The function declaration of the callee.
552 [[maybe_unused]] const clang::Decl *calleeDecl;
553
554 public:
555 AbstractCallee() : calleeDecl(nullptr) {}
556 AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
557
558 bool hasFunctionDecl() const {
559 return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
560 }
561
562 const clang::Decl *getDecl() const { return calleeDecl; }
563
564 unsigned getNumParams() const {
565 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
566 return fd->getNumParams();
567 return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
568 }
569
570 const clang::ParmVarDecl *getParamDecl(unsigned I) const {
571 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
572 return fd->getParamDecl(I);
573 return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
574 I);
575 }
576 };
577
578 struct VlaSizePair {
579 mlir::Value numElts;
581
582 VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
583 };
584
585 /// Return the number of elements for a single dimension
586 /// for the given array type.
587 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
588
589 /// Returns an MLIR::Value+QualType pair that corresponds to the size,
590 /// in non-variably-sized elements, of a variable length array type,
591 /// plus that largest non-variably-sized element type. Assumes that
592 /// the type has already been emitted with emitVariablyModifiedType.
593 VlaSizePair getVLASize(const VariableArrayType *type);
594 VlaSizePair getVLASize(QualType type);
595
597
598 mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
599 return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
600 }
601
602 void finishFunction(SourceLocation endLoc);
603
604 /// Determine whether the given initializer is trivial in the sense
605 /// that it requires no code to be generated.
606 bool isTrivialInitializer(const Expr *init);
607
608 /// If the specified expression does not fold to a constant, or if it does but
609 /// contains a label, return false. If it constant folds return true and set
610 /// the boolean result in Result.
611 bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
612 bool allowLabels = false);
614 llvm::APSInt &resultInt,
615 bool allowLabels = false);
616
617 /// Return true if the statement contains a label in it. If
618 /// this statement is not executed normally, it not containing a label means
619 /// that we can just remove the code.
620 bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
621
622 Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
623
624 class ConstantEmission {
625 // Cannot use mlir::TypedAttr directly here because of bit availability.
626 llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
627 ConstantEmission(mlir::TypedAttr c, bool isReference)
628 : valueAndIsReference(c, isReference) {}
629
630 public:
632 static ConstantEmission forReference(mlir::TypedAttr c) {
633 return ConstantEmission(c, true);
634 }
635 static ConstantEmission forValue(mlir::TypedAttr c) {
636 return ConstantEmission(c, false);
637 }
638
639 explicit operator bool() const {
640 return valueAndIsReference.getOpaqueValue() != nullptr;
641 }
642
643 bool isReference() const { return valueAndIsReference.getInt(); }
645 assert(isReference());
646 cgf.cgm.errorNYI(refExpr->getSourceRange(),
647 "ConstantEmission::getReferenceLValue");
648 return {};
649 }
650
651 mlir::TypedAttr getValue() const {
652 assert(!isReference());
653 return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
654 }
655 };
656
657 ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
658 ConstantEmission tryEmitAsConstant(const MemberExpr *me);
659
662 /// The address of the alloca for languages with explicit address space
663 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
664 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
665 /// as a global constant.
667
668 /// True if the variable is of aggregate type and has a constant
669 /// initializer.
671
672 /// True if the variable is a __block variable that is captured by an
673 /// escaping block.
674 bool isEscapingByRef = false;
675
676 /// True if the variable was emitted as an offload recipe, and thus doesn't
677 /// have the same sort of alloca initialization.
678 bool emittedAsOffload = false;
679
680 mlir::Value nrvoFlag{};
681
682 struct Invalid {};
684
687
689
690 bool wasEmittedAsGlobal() const { return !addr.isValid(); }
691
693
694 /// Returns the raw, allocated address, which is not necessarily
695 /// the address of the object itself. It is casted to default
696 /// address space for address space agnostic languages.
697 Address getAllocatedAddress() const { return addr; }
698
699 // Changes the stored address for the emission. This function should only
700 // be used in extreme cases, and isn't required to model normal AST
701 // initialization/variables.
703
704 /// Returns the address of the object within this declaration.
705 /// Note that this does not chase the forwarding pointer for
706 /// __block decls.
708 if (!isEscapingByRef)
709 return addr;
710
712 return Address::invalid();
713 }
714 };
715
716 /// IndirectBranch - The first time an indirect goto is seen we create a block
717 /// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
718 /// is emitted at the end of the function, once all block destinations have
719 /// been resolved.
720 mlir::Block *indirectGotoBlock = nullptr;
721
724
725 /// Perform the usual unary conversions on the specified expression and
726 /// compare the result against zero, returning an Int1Ty value.
727 mlir::Value evaluateExprAsBool(const clang::Expr *e);
728
729 cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
730 cir::GlobalOp gv,
731 cir::GetGlobalOp gvAddr);
732
733 /// Enter the cleanups necessary to complete the given phase of destruction
734 /// for a destructor. The end result should call destructors on members and
735 /// base classes in reverse order of their construction.
737
738 /// Determines whether an EH cleanup is required to destroy a type
739 /// with the given destruction kind.
740 /// TODO(cir): could be shared with Clang LLVM codegen
742 switch (kind) {
744 return false;
748 return getLangOpts().Exceptions;
750 return getLangOpts().Exceptions &&
751 cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
752 }
753 llvm_unreachable("bad destruction kind");
754 }
755
759
761
762 /// Set the address of a local variable.
764 assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
765 localDeclMap.insert({vd, addr});
766
767 // Add to the symbol table if not there already.
768 if (symbolTable.count(vd))
769 return;
770 symbolTable.insert(vd, addr.getPointer());
771 }
772
773 // Replaces the address of the local variable, if it exists. Else does the
774 // same thing as setAddrOfLocalVar.
776 localDeclMap.insert_or_assign(vd, addr);
777 }
778
779 // A class to allow reverting changes to a var-decl's registration to the
780 // localDeclMap. This is used in cases where things are being inserted into
781 // the variable list but don't follow normal lookup/search rules, like in
782 // OpenACC recipe generation.
784 CIRGenFunction &cgf;
785 const VarDecl *vd;
786 bool shouldDelete = false;
787 Address oldAddr = Address::invalid();
788
789 public:
791 : cgf(cgf), vd(vd) {
792 auto mapItr = cgf.localDeclMap.find(vd);
793
794 if (mapItr != cgf.localDeclMap.end())
795 oldAddr = mapItr->second;
796 else
797 shouldDelete = true;
798 }
799
801 if (shouldDelete)
802 cgf.localDeclMap.erase(vd);
803 else
804 cgf.localDeclMap.insert_or_assign(vd, oldAddr);
805 }
806 };
807
809
812
813 static bool
815
822
825
829 const clang::CXXRecordDecl *nearestVBase,
830 clang::CharUnits offsetFromNearestVBase,
831 bool baseIsNonVirtualPrimaryBase,
832 const clang::CXXRecordDecl *vtableClass,
833 VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
834 /// Return the Value of the vtable pointer member pointed to by thisAddr.
835 mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
836 const clang::CXXRecordDecl *vtableClass);
837
838 /// Returns whether we should perform a type checked load when loading a
839 /// virtual function for virtual calls to members of RD. This is generally
840 /// true when both vcall CFI and whole-program-vtables are enabled.
842
843 /// Source location information about the default argument or member
844 /// initializer expression we're evaluating, if any.
848
849 /// A scope within which we are constructing the fields of an object which
850 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
851 /// we need to evaluate the CXXDefaultInitExpr within the evaluation.
853 public:
855 : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
856 cgf.cxxDefaultInitExprThis = thisAddr;
857 }
859 cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
860 }
861
862 private:
863 CIRGenFunction &cgf;
864 Address oldCXXDefaultInitExprThis;
865 };
866
867 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
868 /// is overridden to be the object under construction.
870 public:
875 cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
876 cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
877 }
879 cgf.cxxThisValue = oldCXXThisValue;
880 cgf.cxxThisAlignment = oldCXXThisAlignment;
881 }
882
883 public:
885 mlir::Value oldCXXThisValue;
888 };
889
894
895 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
896 /// current loop index is overridden. In order to encourage re-use of existing
897 /// array initialization, this uses a flag to determine if it is a 'no-op' or
898 /// not.
900 public:
901 ArrayInitLoopExprScope(CIRGenFunction &cgf, bool setIdx, mlir::Value index)
902 : cgf(cgf),
903 oldArrayInitIndex(setIdx
904 ? std::optional<mlir::Value>(cgf.arrayInitIndex)
905 : std::nullopt) {
906 if (setIdx)
907 cgf.arrayInitIndex = index;
908 }
910 if (oldArrayInitIndex.has_value())
911 cgf.arrayInitIndex = *oldArrayInitIndex;
912 }
913
914 private:
915 CIRGenFunction &cgf;
916 std::optional<mlir::Value> oldArrayInitIndex;
917 };
918
919 /// Get the index of the current ArrayInitLoopExpr, if any.
920 mlir::Value getArrayInitIndex() { return arrayInitIndex; }
921
923 LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
924
925 /// Construct an address with the natural alignment of T. If a pointer to T
926 /// is expected to be signed, the pointer passed to this function must have
927 /// been signed, and the returned Address will have the pointer authentication
928 /// information needed to authenticate the signed pointer.
930 CharUnits alignment,
931 bool forPointeeType = false,
932 LValueBaseInfo *baseInfo = nullptr) {
933 if (alignment.isZero())
934 alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
935 return Address(ptr, convertTypeForMem(t), alignment);
936 }
937
939 Address value, const CXXRecordDecl *derived,
940 llvm::iterator_range<CastExpr::path_const_iterator> path,
941 bool nullCheckValue, SourceLocation loc);
942
944 mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
945 llvm::iterator_range<CastExpr::path_const_iterator> path,
946 bool nullCheckValue);
947
948 /// Return the VTT parameter that should be passed to a base
949 /// constructor/destructor with virtual bases.
950 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
951 /// to ItaniumCXXABI.cpp together with all the references to VTT.
952 mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
953 bool delegating);
954
957 return makeAddrLValue(addr, ty, LValueBaseInfo(source));
958 }
959
961 return LValue::makeAddr(addr, ty, baseInfo);
962 }
963
964 void initializeVTablePointers(mlir::Location loc,
965 const clang::CXXRecordDecl *rd);
966 void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
967
969
970 /// Return the address of a local variable.
972 auto it = localDeclMap.find(vd);
973 assert(it != localDeclMap.end() &&
974 "Invalid argument to getAddrOfLocalVar(), no decl!");
975 return it->second;
976 }
977
979 mlir::Type fieldType, unsigned index);
980
981 /// Given an opaque value expression, return its LValue mapping if it exists,
982 /// otherwise create one.
984
985 /// Given an opaque value expression, return its RValue mapping if it exists,
986 /// otherwise create one.
988
989 /// Load the value for 'this'. This function is only valid while generating
990 /// code for an C++ member function.
991 /// FIXME(cir): this should return a mlir::Value!
992 mlir::Value loadCXXThis() {
993 assert(cxxThisValue && "no 'this' value for this function");
994 return cxxThisValue;
995 }
997
998 /// Load the VTT parameter to base constructors/destructors have virtual
999 /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
1000 /// be abstracted properly.
1001 mlir::Value loadCXXVTT() {
1002 assert(cxxStructorImplicitParamValue && "no VTT value for this function");
1004 }
1005
1006 /// Convert the given pointer to a complete class to the given direct base.
1008 Address value,
1009 const CXXRecordDecl *derived,
1010 const CXXRecordDecl *base,
1011 bool baseIsVirtual);
1012
1013 /// Determine whether a return value slot may overlap some other object.
1015 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
1016 // class subobjects. These cases may need to be revisited depending on the
1017 // resolution of the relevant core issue.
1019 }
1020
1021 /// Determine whether a base class initialization may overlap some other
1022 /// object.
1024 const CXXRecordDecl *baseRD,
1025 bool isVirtual);
1026
1027 /// Get an appropriate 'undef' rvalue for the given type.
1028 /// TODO: What's the equivalent for MLIR? Currently we're only using this for
1029 /// void types so it just returns RValue::get(nullptr) but it'll need
1030 /// addressed later.
1032
1033 cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
1034 cir::FuncType funcType);
1035
1037 FunctionArgList &args);
1038
1039 /// Emit the function prologue: declare function arguments in the symbol
1040 /// table.
1041 void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
1042 const FunctionDecl *fd, SourceLocation bodyBeginLoc);
1043
1044 /// Emit code for the start of a function.
1045 /// \param loc The location to be associated with the function.
1046 /// \param startLoc The location of the function body.
1048 cir::FuncOp fn, cir::FuncType funcType,
1050 clang::SourceLocation startLoc);
1051
1052 /// returns true if aggregate type has a volatile member.
1054 if (const auto *rd = t->getAsRecordDecl())
1055 return rd->hasVolatileMember();
1056 return false;
1057 }
1058
1059 void addCatchHandlerAttr(const CXXCatchStmt *catchStmt,
1060 SmallVector<mlir::Attribute> &handlerAttrs);
1061
1062 /// The cleanup depth enclosing all the cleanups associated with the
1063 /// parameters.
1065
1067
1068 /// Takes the old cleanup stack size and emits the cleanup blocks
1069 /// that have been added.
1070 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
1071 ArrayRef<mlir::Value *> valuesToReload = {});
1072
1073 /// Pops cleanup blocks until the given savepoint is reached, then adds the
1074 /// cleanups from the given savepoint in the lifetime-extended cleanups stack.
1075 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
1076 size_t oldLifetimeExtendedSize,
1077 ArrayRef<mlir::Value *> valuesToReload = {});
1078 void popCleanupBlock(bool forDeactivation = false);
1079
1080 void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc);
1081
1082 /// Deactivates the given cleanup block. The block cannot be reactivated. Pops
1083 /// it if it's the top of the stack.
1084 ///
1085 /// \param DominatingIP - An instruction which is known to
1086 /// dominate the current IP (if set) and which lies along
1087 /// all paths of execution between the current IP and the
1088 /// the point at which the cleanup comes into scope.
1089 void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup,
1090 mlir::Operation *dominatingIP);
1091
1092 /// Create an active flag variable for use with conditional cleanups. The
1093 /// flag is initialized to false before the outermost conditional and set to
1094 /// true at the current insertion point (inside the conditional branch).
1095 Address createCleanupActiveFlag();
1096
1097 /// Promote a single pending cleanup entry onto the EH scope stack. If the
1098 /// entry has a valid activeFlag, the cleanup is configured as conditional.
1099 /// Defined in CIRGenDecl.cpp where the concrete cleanup types are visible.
1100 void pushPendingCleanupToEHStack(const PendingCleanupEntry &entry);
1101
1102 /// Push a cleanup to be run at the end of the current full-expression. Safe
1103 /// against the possibility that we're currently inside a
1104 /// conditionally-evaluated expression.
1105 template <class T, class... As>
1107 if (!isInConditionalBranch())
1108 return ehStack.pushCleanup<T>(kind, a...);
1109
1110 // Defer the cleanup until the FullExprCleanupScope exits. We can't push
1111 // to the EH stack now because the ternary's inner LexicalScope would pop
1112 // it prematurely.
1113 Address activeFlag = createCleanupActiveFlag();
1115 PendingCleanupEntry{kind, a..., activeFlag});
1116 }
1117
1118 /// Push a cleanup and record it for deferred deactivation. The cleanup will
1119 /// be deactivated when the enclosing CleanupDeactivationScope exits.
1120 template <class T, class... As>
1122 mlir::Location loc = builder.getUnknownLoc();
1123 mlir::Operation *dominatingIP = builder.getBool(false, loc).getOperation();
1124 ehStack.pushCleanup<T>(kind, a...);
1126 {ehStack.stable_begin(), dominatingIP});
1127 }
1128
1130 Address addr, QualType type);
1132 QualType type, Destroyer *destroyer,
1133 bool useEHCleanupForArray);
1134
1135 /// Queue a cleanup to be pushed after finishing the current full-expression.
1136 /// When the enclosing RunCleanupsScope exits, popCleanupBlocks promotes these
1137 /// entries onto the EH scope stack for the enclosing scope.
1139 Destroyer *destroyer) {
1140 lifetimeExtendedCleanupStack.push_back({kind, addr, type, destroyer});
1141 }
1142
1143 /// Enters a new scope for capturing cleanups, all of which
1144 /// will be executed once the scope is exited.
1145 class RunCleanupsScope {
1146 EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
1147 size_t lifetimeExtendedCleanupStackSize;
1148 CleanupDeactivationScope deactivateCleanups;
1149
1150 protected:
1153
1154 private:
1155 RunCleanupsScope(const RunCleanupsScope &) = delete;
1156 void operator=(const RunCleanupsScope &) = delete;
1157
1158 protected:
1160
1161 public:
1162 /// Enter a new cleanup scope.
1164 : deactivateCleanups(cgf), performCleanup(true), cgf(cgf) {
1165 cleanupStackDepth = cgf.ehStack.stable_begin();
1166 lifetimeExtendedCleanupStackSize =
1167 cgf.lifetimeExtendedCleanupStack.size();
1168 oldDidCallStackSave = cgf.didCallStackSave;
1169 cgf.didCallStackSave = false;
1170 oldCleanupStackDepth = cgf.currentCleanupStackDepth;
1171 cgf.currentCleanupStackDepth = cleanupStackDepth;
1172 }
1173
1174 /// Exit this cleanup scope, emitting any accumulated cleanups.
1176 if (performCleanup)
1177 forceCleanup();
1178 }
1179
1180 /// Force the emission of cleanups now, instead of waiting
1181 /// until this object is destroyed.
1182 void forceCleanup(ArrayRef<mlir::Value *> valuesToReload = {}) {
1183 assert(performCleanup && "Already forced cleanup");
1185 deactivateCleanups.forceDeactivate();
1186 cgf.popCleanupBlocks(cleanupStackDepth, lifetimeExtendedCleanupStackSize,
1187 valuesToReload);
1188 performCleanup = false;
1189 cgf.currentCleanupStackDepth = oldCleanupStackDepth;
1190 }
1191
1192 /// Whether there are any pending cleanups that have been pushed since
1193 /// this scope was entered.
1194 bool hasPendingCleanups() const {
1195 return cgf.ehStack.stable_begin() != cleanupStackDepth;
1196 }
1197 };
1198
1199 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1201
1203 CIRGenFunction &cgf;
1204 RunCleanupsScope cleanups;
1205 cir::CleanupScopeOp scope;
1206 size_t deferredCleanupStackSize;
1207 bool exited = false;
1208
1209 public:
1210 FullExprCleanupScope(CIRGenFunction &cgf, const Expr *subExpr);
1211
1212 void exit(ArrayRef<mlir::Value *> valuesToReload = {});
1213
1215 if (!exited)
1216 exit();
1217 }
1218
1219 private:
1221 void operator=(const FullExprCleanupScope &) = delete;
1222 };
1223
1224public:
1225 /// Represents a scope, including function bodies, compound statements, and
1226 /// the substatements of if/while/do/for/switch/try statements. This class
1227 /// handles any automatic cleanup, along with the return value.
1228 struct LexicalScope : public RunCleanupsScope {
1229 private:
1230 // Points to the scope entry block. This is useful, for instance, for
1231 // helping to insert allocas before finalizing any recursive CodeGen from
1232 // switches.
1233 mlir::Block *entryBlock;
1234
1235 LexicalScope *parentScope = nullptr;
1236
1237 // Holds the actual value for ScopeKind::Try
1238 cir::TryOp tryOp = nullptr;
1239
1240 // On a coroutine body, the OnFallthrough sub stmt holds the handler
1241 // (CoreturnStmt) for control flow falling off the body. Keep track
1242 // of emitted co_return in this scope and allow OnFallthrough to be
1243 // skipeed.
1244 bool hasCoreturnStmt = false;
1245
1246 // Only Regular is used at the moment. Support for other kinds will be
1247 // added as the relevant statements/expressions are upstreamed.
1248 enum Kind {
1249 Regular, // cir.if, cir.scope, if_regions
1250 Ternary, // cir.ternary
1251 Switch, // cir.switch
1252 Try, // cir.try
1253 GlobalInit // cir.global initialization code
1254 };
1255 Kind scopeKind = Kind::Regular;
1256
1257 // The scope return value.
1258 mlir::Value retVal = nullptr;
1259
1260 mlir::Location beginLoc;
1261 mlir::Location endLoc;
1262
1263 public:
1264 unsigned depth = 0;
1265
1266 LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
1267 : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
1268 beginLoc(loc), endLoc(loc) {
1269
1270 assert(entryBlock && "LexicalScope requires an entry block");
1271 cgf.curLexScope = this;
1272 if (parentScope)
1273 ++depth;
1274
1275 if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
1276 assert(fusedLoc.getLocations().size() == 2 && "too many locations");
1277 beginLoc = fusedLoc.getLocations()[0];
1278 endLoc = fusedLoc.getLocations()[1];
1279 }
1280 }
1281
1282 void setRetVal(mlir::Value v) { retVal = v; }
1283
1284 void cleanup();
1285 void restore() { cgf.curLexScope = parentScope; }
1286
1289 cleanup();
1290 restore();
1291 }
1292
1293 // ---
1294 // Coroutine tracking
1295 // ---
1296 bool hasCoreturn() const { return hasCoreturnStmt; }
1297 void setCoreturn() { hasCoreturnStmt = true; }
1298
1299 // ---
1300 // Kind
1301 // ---
1302 bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
1303 bool isRegular() { return scopeKind == Kind::Regular; }
1304 bool isSwitch() { return scopeKind == Kind::Switch; }
1305 bool isTernary() { return scopeKind == Kind::Ternary; }
1306 bool isTry() { return scopeKind == Kind::Try; }
1307 cir::TryOp getClosestTryParent();
1308 void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
1309 void setAsSwitch() { scopeKind = Kind::Switch; }
1310 void setAsTernary() { scopeKind = Kind::Ternary; }
1311 void setAsTry(cir::TryOp op) {
1312 scopeKind = Kind::Try;
1313 tryOp = op;
1314 }
1315
1316 cir::TryOp getTry() {
1317 assert(isTry());
1318 return tryOp;
1319 }
1320
1321 // ---
1322 // Return handling.
1323 // ---
1324
1325 private:
1326 // On switches we need one return block per region, since cases don't
1327 // have their own scopes but are distinct regions nonetheless.
1328
1329 // TODO: This implementation should change once we have support for early
1330 // exits in MLIR structured control flow (llvm-project#161575)
1332 llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
1333 llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
1334 std::optional<unsigned> normalRetBlockIndex;
1335
1336 // There's usually only one ret block per scope, but this needs to be
1337 // get or create because of potential unreachable return statements, note
1338 // that for those, all source location maps to the first one found.
1339 mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1340 assert((isa_and_nonnull<cir::CaseOp>(
1341 cgf.builder.getBlock()->getParentOp()) ||
1342 retBlocks.size() == 0) &&
1343 "only switches can hold more than one ret block");
1344
1345 // Create the return block but don't hook it up just yet.
1346 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
1347 auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
1348 retBlocks.push_back(b);
1349 updateRetLoc(b, loc);
1350 return b;
1351 }
1352
1353 cir::ReturnOp emitReturn(mlir::Location loc);
1354 void emitImplicitReturn();
1355
1356 public:
1358 mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
1359 void updateRetLoc(mlir::Block *b, mlir::Location loc) {
1360 retLocs.insert_or_assign(b, loc);
1361 }
1362
1363 mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1364 // Check if we're inside a case region
1365 if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
1366 cgf.builder.getBlock()->getParentOp())) {
1367 auto iter = retBlockInCaseIndex.find(caseOp);
1368 if (iter != retBlockInCaseIndex.end()) {
1369 // Reuse existing return block
1370 mlir::Block *ret = retBlocks[iter->second];
1371 updateRetLoc(ret, loc);
1372 return ret;
1373 }
1374 // Create new return block
1375 mlir::Block *ret = createRetBlock(cgf, loc);
1376 retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
1377 return ret;
1378 }
1379
1380 if (normalRetBlockIndex) {
1381 mlir::Block *ret = retBlocks[*normalRetBlockIndex];
1382 updateRetLoc(ret, loc);
1383 return ret;
1384 }
1385
1386 mlir::Block *ret = createRetBlock(cgf, loc);
1387 normalRetBlockIndex = retBlocks.size() - 1;
1388 return ret;
1389 }
1390
1391 mlir::Block *getEntryBlock() { return entryBlock; }
1392 };
1393
1395
1397
1399 QualType type);
1400
1401 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
1402 QualType type);
1403
1405 Destroyer *destroyer);
1406
1408 QualType type, Destroyer *destroyer,
1409 bool useEHCleanupForArray);
1410
1412
1413 void pushIrregularPartialArrayCleanup(mlir::Value arrayBegin,
1414 Address arrayEndPointer,
1415 QualType elementType,
1416 CharUnits elementAlign,
1417 Destroyer *destroyer);
1418
1419 /// Start generating a thunk function.
1420 void startThunk(cir::FuncOp fn, GlobalDecl gd,
1421 const CIRGenFunctionInfo &fnInfo, bool isUnprototyped);
1422
1423 /// Finish generating a thunk function.
1424 void finishThunk();
1425
1426 /// Generate code for a thunk function.
1427 void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo,
1428 GlobalDecl gd, const ThunkInfo &thunk,
1429 bool isUnprototyped);
1430
1431 /// ----------------------
1432 /// CIR emit functions
1433 /// ----------------------
1434public:
1435 bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr,
1437 clang::SVETypeFlags typeFlags);
1438 mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts,
1439 mlir::Location loc);
1440 std::optional<mlir::Value>
1441 emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
1443 llvm::Triple::ArchType arch);
1444 std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
1445 const CallExpr *expr);
1446 std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
1447 const CallExpr *expr);
1448
1449 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
1450 SourceLocation loc,
1451 SourceLocation assumptionLoc,
1452 int64_t alignment,
1453 mlir::Value offsetValue = nullptr);
1454
1455 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
1456 SourceLocation assumptionLoc,
1457 int64_t alignment,
1458 mlir::Value offsetValue = nullptr);
1459
1460private:
1461 void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
1462 clang::CharUnits alignment);
1463
1464 CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
1465
1466public:
1468 llvm::StringRef fieldName,
1469 unsigned fieldIndex);
1470
1471 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1472 mlir::Location loc, clang::CharUnits alignment,
1473 bool insertIntoFnEntryBlock,
1474 mlir::Value arraySize = nullptr);
1475 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1476 mlir::Location loc, clang::CharUnits alignment,
1477 mlir::OpBuilder::InsertPoint ip,
1478 mlir::Value arraySize = nullptr);
1479
1480 void emitAggregateStore(mlir::Value value, Address dest);
1481
1482 void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
1483
1485
1487
1488 /// Emit an aggregate copy.
1489 ///
1490 /// \param isVolatile \c true iff either the source or the destination is
1491 /// volatile.
1492 /// \param MayOverlap Whether the tail padding of the destination might be
1493 /// occupied by some other object. More efficient code can often be
1494 /// generated if not.
1495 void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
1496 AggValueSlot::Overlap_t mayOverlap,
1497 bool isVolatile = false);
1498
1499 /// Emit code to compute the specified expression which can have any type. The
1500 /// result is returned as an RValue struct. If this is an aggregate
1501 /// expression, the aggloc/agglocvolatile arguments indicate where the result
1502 /// should be returned.
1505 bool ignoreResult = false);
1506
1507 /// Emits the code necessary to evaluate an arbitrary expression into the
1508 /// given memory location.
1509 void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
1510 bool isInitializer);
1511
1512 /// Similarly to emitAnyExpr(), however, the result will always be accessible
1513 /// even if no aggregate location is provided.
1515
1516 void emitAnyExprToExn(const Expr *e, Address addr);
1517
1518 void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
1519 QualType elementType, CharUnits elementAlign,
1520 Destroyer *destroyer);
1521
1522 mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
1523 QualType &baseType, Address &addr);
1526
1528
1530 LValueBaseInfo *baseInfo = nullptr);
1531
1532 std::pair<mlir::Value, mlir::Type>
1534 QualType inputType, std::string &constraintString,
1535 SourceLocation loc);
1536 std::pair<mlir::Value, mlir::Type>
1537 emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr,
1538 std::string &constraintString);
1539 mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
1540
1542 void emitAtomicInit(Expr *init, LValue dest);
1543 void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
1544 void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
1545 bool isVolatile, bool isInit);
1547 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
1548 llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
1549
1550 mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s);
1551
1552 AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
1553 mlir::OpBuilder::InsertPoint ip = {});
1554
1556
1557 /// Emit code and set up symbol table for a variable declaration with auto,
1558 /// register, or no storage class specifier. These turn into simple stack
1559 /// objects, globals depending on target.
1560 void emitAutoVarDecl(const clang::VarDecl &d);
1561
1562 void emitAutoVarCleanups(const AutoVarEmission &emission);
1563 /// Emit the initializer for an allocated variable. If this call is not
1564 /// associated with the call to emitAutoVarAlloca (as the address of the
1565 /// emission is not directly an alloca), the allocatedSeparately parameter can
1566 /// be used to suppress the assertions. However, this should only be used in
1567 /// extreme cases, as it doesn't properly reflect the language/AST.
1568 void emitAutoVarInit(const AutoVarEmission &emission);
1569 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1571
1572 void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
1573
1574 void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
1575 CXXCtorInitializer *baseInit);
1576
1578
1579 mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
1580
1581 RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
1582 const clang::CallExpr *e, ReturnValueSlot returnValue);
1583
1584 /// Returns a Value corresponding to the size of the given expression by
1585 /// emitting a `cir.objsize` operation.
1586 ///
1587 /// \param e The expression whose object size to compute
1588 /// \param type Determines the semantics of the object size computation.
1589 /// The type parameter is a 2-bit value where:
1590 /// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
1591 /// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
1592 /// \param resType The result type for the size value
1593 /// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
1594 /// call `cir.objsize` on this value rather than emitting e.
1595 /// \param isDynamic If true, allows runtime evaluation via dynamic mode
1596 mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
1597 cir::IntType resType, mlir::Value emittedE,
1598 bool isDynamic);
1599
1600 mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
1601 unsigned type,
1602 cir::IntType resType,
1603 mlir::Value emittedE,
1604 bool isDynamic);
1605
1606 int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
1607
1609
1610 RValue emitCall(const CIRGenFunctionInfo &funcInfo,
1611 const CIRGenCallee &callee, ReturnValueSlot returnValue,
1612 const CallArgList &args, cir::CIRCallOpInterface *callOp,
1613 mlir::Location loc);
1616 const CallArgList &args,
1617 cir::CIRCallOpInterface *callOrTryCall = nullptr) {
1618 assert(currSrcLoc && "source location must have been set");
1619 return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
1620 *currSrcLoc);
1621 }
1622
1623 RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
1625
1626 /// Emit the call and return for a thunk function.
1627 void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk,
1628 bool isUnprototyped);
1629
1630 void emitCallArg(CallArgList &args, const clang::Expr *e,
1631 clang::QualType argType);
1632 void emitCallArgs(
1633 CallArgList &args, PrototypeWrapper prototype,
1634 llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
1635 AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
1640
1641 template <typename T>
1642 mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
1643 mlir::ArrayAttr value,
1644 cir::CaseOpKind kind,
1645 bool buildingTopLevelCase);
1646
1648
1649 mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
1650 mlir::Type condType,
1651 bool buildingTopLevelCase);
1652
1653 LValue emitCastLValue(const CastExpr *e);
1654
1655 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
1656 /// sanitizer is enabled, a runtime check is also emitted.
1657 mlir::Value emitCheckedArgForAssume(const Expr *e);
1658
1659 /// Emit a conversion from the specified complex type to the specified
1660 /// destination type, where the destination type is an LLVM scalar type.
1661 mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
1662 QualType dstTy, SourceLocation loc);
1663
1666
1668
1669 mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
1670 cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1671 cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1672 cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
1673 cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
1674 mlir::Value coroframeAddr);
1676
1677 void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
1678
1680
1681 mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
1682
1683 mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s);
1684
1686 AggValueSlot dest);
1687
1690 Address arrayBegin, const CXXConstructExpr *e,
1691 bool newPointerIsChecked,
1692 bool zeroInitialize = false);
1694 mlir::Value numElements, Address arrayBase,
1695 const CXXConstructExpr *e,
1696 bool newPointerIsChecked,
1697 bool zeroInitialize);
1699 clang::CXXCtorType type, bool forVirtualBase,
1700 bool delegating, AggValueSlot thisAVS,
1701 const clang::CXXConstructExpr *e);
1702
1704 clang::CXXCtorType type, bool forVirtualBase,
1705 bool delegating, Address thisAddr,
1707
1709 bool forVirtualBase, Address thisAddr,
1710 bool inheritedFromVBase,
1711 const CXXInheritedCtorInitExpr *e);
1712
1714 SourceLocation loc, const CXXConstructorDecl *d, CXXCtorType ctorType,
1715 bool forVirtualBase, bool delegating, CallArgList &args);
1716
1717 void emitCXXDeleteExpr(const CXXDeleteExpr *e);
1718
1720 bool forVirtualBase, bool delegating,
1721 Address thisAddr, QualType thisTy);
1722
1724 mlir::Value thisVal, QualType thisTy,
1725 mlir::Value implicitParam,
1726 QualType implicitParamTy, const CallExpr *e);
1727
1728 mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
1730
1733
1735 const Expr *e, Address base, mlir::Value memberPtr,
1736 const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
1737
1739 const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
1740 ReturnValueSlot returnValue, mlir::Value thisPtr,
1741 mlir::Value implicitParam, clang::QualType implicitParamTy,
1742 const clang::CallExpr *ce, CallArgList *rtlArgs);
1743
1745 const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
1746 ReturnValueSlot returnValue, bool hasQualifier,
1747 clang::NestedNameSpecifier qualifier, bool isArrow,
1748 const clang::Expr *base);
1749
1752
1753 mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
1754
1755 void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
1756 mlir::Type elementTy, Address beginPtr,
1757 mlir::Value numElements,
1758 mlir::Value allocSizeWithoutCookie);
1759
1760 /// Create a check for a function parameter that may potentially be
1761 /// declared as non-null.
1762 void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc,
1763 AbstractCallee ac, unsigned paramNum);
1764
1766 const CXXMethodDecl *md,
1768
1771
1773
1775 const CallExpr *callExpr,
1777
1778 void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
1779 Address ptr);
1780
1781 void emitCXXThrowExpr(const CXXThrowExpr *e);
1782
1784 virtual mlir::LogicalResult operator()(CIRGenFunction &cgf) = 0;
1785 virtual ~cxxTryBodyEmitter() = default;
1786 };
1787
1788 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s,
1789 cxxTryBodyEmitter &bodyCallback);
1790 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
1791
1793 clang::CXXCtorType ctorType, FunctionArgList &args);
1794
1795 // It's important not to confuse this and emitDelegateCXXConstructorCall.
1796 // Delegating constructors are the C++11 feature. The constructor delegate
1797 // optimization is used to reduce duplication in the base and complete
1798 // constructors where they are substantially the same.
1800 const FunctionArgList &args);
1801
1802 void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
1803 QualType deleteTy);
1804
1805 mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
1806
1807 mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e);
1808 mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
1809
1810 /// Emit an expression as an initializer for an object (variable, field, etc.)
1811 /// at the given location. The expression is not necessarily the normal
1812 /// initializer for the object, and the address is not necessarily
1813 /// its normal location.
1814 ///
1815 /// \param init the initializing expression
1816 /// \param d the object to act as if we're initializing
1817 /// \param lvalue the lvalue to initialize
1818 /// \param capturedByInit true if \p d is a __block variable whose address is
1819 /// potentially changed by the initializer
1820 void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
1821 LValue lvalue, bool capturedByInit = false);
1822
1823 mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
1824
1825 mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
1826
1827 mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
1828
1830
1832 clang::Expr *init);
1833
1835
1836 mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
1837
1838 mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
1839
1840 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
1841
1842 void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
1843
1844 mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
1846 mlir::NamedAttrList attrs = {});
1847
1848 void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc);
1849
1850 /// Emit the computation of the specified expression of scalar type.
1851 mlir::Value emitScalarExpr(const clang::Expr *e,
1852 bool ignoreResultAssign = false);
1853
1854 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv);
1855
1856 /// Build a debug stoppoint if we are emitting debug info.
1857 void emitStopPoint(const Stmt *s);
1858
1859 // Build CIR for a statement. useCurrentScope should be true if no
1860 // new scopes need be created when finding a compound statement.
1861 mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
1862 llvm::ArrayRef<const Attr *> attrs = {});
1863
1864 mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
1865 bool useCurrentScope);
1866
1867 mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
1868
1869 void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
1870 CallArgList &callArgs);
1871
1872 RValue emitCoawaitExpr(const CoawaitExpr &e,
1873 AggValueSlot aggSlot = AggValueSlot::ignored(),
1874 bool ignoreResult = false);
1875
1876 RValue emitCoyieldExpr(const CoyieldExpr &e,
1877 AggValueSlot aggSlot = AggValueSlot::ignored(),
1878 bool ignoreResult = false);
1879 /// Emit the computation of the specified expression of complex type,
1880 /// returning the result.
1881 mlir::Value emitComplexExpr(const Expr *e);
1882
1883 void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
1884
1885 mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv);
1886
1887 LValue emitComplexAssignmentLValue(const BinaryOperator *e);
1888 LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
1889 LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
1890 mlir::Value &result);
1891
1892 mlir::LogicalResult
1893 emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
1894 AggValueSlot slot = AggValueSlot::ignored());
1895
1896 mlir::LogicalResult
1898 Address *lastValue = nullptr,
1899 AggValueSlot slot = AggValueSlot::ignored());
1900
1901 void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
1902 mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
1903 LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
1904
1905 mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
1906 mlir::Type condType,
1907 bool buildingTopLevelCase);
1908
1910 clang::CXXCtorType ctorType,
1911 const FunctionArgList &args,
1913
1914 /// We are performing a delegate call; that is, the current function is
1915 /// delegating to another one. Produce a r-value suitable for passing the
1916 /// given parameter.
1917 void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
1919
1920 /// Emit an `if` on a boolean condition to the specified blocks.
1921 /// FIXME: Based on the condition, this might try to simplify the codegen of
1922 /// the conditional based on the branch.
1923 /// In the future, we may apply code generation simplifications here,
1924 /// similar to those used in classic LLVM codegen
1925 /// See `EmitBranchOnBoolExpr` for inspiration.
1926 mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
1927 const clang::Stmt *thenS,
1928 const clang::Stmt *elseS);
1929 cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
1930 BuilderCallbackRef thenBuilder,
1931 mlir::Location thenLoc,
1932 BuilderCallbackRef elseBuilder,
1933 std::optional<mlir::Location> elseLoc = {});
1934
1935 mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
1936
1937 LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
1938
1939 mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
1940 mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
1941
1942 void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
1943 void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
1944
1945 mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
1946
1947 /// Emit code to compute the specified expression,
1948 /// ignoring the result.
1949 void emitIgnoredExpr(const clang::Expr *e);
1950
1951 RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
1952
1953 /// Load a complex number from the specified l-value.
1954 mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
1955
1956 RValue emitLoadOfExtVectorElementLValue(LValue lv);
1957
1958 /// Given an expression that represents a value lvalue, this method emits
1959 /// the address of the lvalue, then loads the result as an rvalue,
1960 /// returning the rvalue.
1961 RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
1962
1963 Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
1964 LValueBaseInfo *pointeeBaseInfo);
1965 LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
1966 QualType refTy, AlignmentSource source);
1967
1968 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1969 /// care to appropriately convert from the memory representation to
1970 /// the LLVM value representation. The l-value must be a simple
1971 /// l-value.
1972 mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
1973 mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
1974 SourceLocation loc, LValueBaseInfo baseInfo);
1975
1976 /// Emit code to compute a designator that specifies the location
1977 /// of the expression.
1978 /// FIXME: document this function better.
1979 LValue emitLValue(const clang::Expr *e);
1980 LValue emitLValueForBitField(LValue base, const FieldDecl *field);
1981 LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
1982
1983 LValue emitLValueForLambdaField(const FieldDecl *field);
1984 LValue emitLValueForLambdaField(const FieldDecl *field,
1985 mlir::Value thisValue);
1986
1987 /// Like emitLValueForField, excpet that if the Field is a reference, this
1988 /// will return the address of the reference and not the address of the value
1989 /// stored in the reference.
1990 LValue emitLValueForFieldInitialization(LValue base,
1991 const clang::FieldDecl *field,
1992 llvm::StringRef fieldName);
1993
1994 LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
1995
1996 LValue emitMemberExpr(const MemberExpr *e);
1997
1998 /// Emit a musttail call for a thunk with a potentially different ABI.
1999 void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr,
2000 cir::FuncOp callee);
2001
2002 /// Emit a call to an AMDGPU builtin function.
2003 std::optional<mlir::Value> emitAMDGPUBuiltinExpr(unsigned builtinID,
2004 const CallExpr *expr);
2005
2006 LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
2007
2008 LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
2009
2010 /// Given an expression with a pointer type, emit the value and compute our
2011 /// best estimate of the alignment of the pointee.
2012 ///
2013 /// One reasonable way to use this information is when there's a language
2014 /// guarantee that the pointer must be aligned to some stricter value, and
2015 /// we're simply trying to ensure that sufficiently obvious uses of under-
2016 /// aligned objects don't get miscompiled; for example, a placement new
2017 /// into the address of a local variable. In such a case, it's quite
2018 /// reasonable to just ignore the returned alignment when it isn't from an
2019 /// explicit source.
2020 Address emitPointerWithAlignment(const clang::Expr *expr,
2021 LValueBaseInfo *baseInfo = nullptr);
2022
2023 /// Emits a reference binding to the passed in expression.
2024 RValue emitReferenceBindingToExpr(const Expr *e);
2025
2026 mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
2027
2028 RValue emitRotate(const CallExpr *e, bool isRotateLeft);
2029
2030 mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
2031
2032 /// Emit a conversion from the specified type to the specified destination
2033 /// type, both of which are CIR scalar types.
2034 mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
2035 clang::QualType dstType,
2036 clang::SourceLocation loc);
2037
2038 void emitScalarInit(const clang::Expr *init, mlir::Location loc,
2039 LValue lvalue, bool capturedByInit = false);
2040
2041 mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
2042 const Expr *argExpr);
2043
2044 void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
2045
2046 /// Emit a guarded initializer for a static local variable or a static
2047 /// data member of a class template instantiation.
2048 void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp,
2049 bool performInit);
2050
2051 void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
2052 bool isInit);
2053
2054 void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
2055 clang::QualType ty, LValueBaseInfo baseInfo,
2056 bool isInit = false, bool isNontemporal = false);
2057 void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
2058
2059 /// Store the specified rvalue into the specified
2060 /// lvalue, where both are guaranteed to the have the same type, and that type
2061 /// is 'Ty'.
2062 void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
2063
2064 mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
2065
2066 LValue emitStringLiteralLValue(const StringLiteral *e,
2067 llvm::StringRef name = ".str");
2068
2069 mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
2070 mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
2071 bool buildingTopLevelCase);
2072 mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
2073
2074 std::optional<mlir::Value>
2075 emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
2076 ReturnValueSlot &returnValue);
2077
2078 /// Given a value and its clang type, returns the value casted to its memory
2079 /// representation.
2080 /// Note: CIR defers most of the special casting to the final lowering passes
2081 /// to conserve the high level information.
2082 mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
2083
2084 /// EmitFromMemory - Change a scalar value from its memory
2085 /// representation to its value representation.
2086 mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty);
2087
2088 /// Emit a trap instruction, which is used to abort the program in an abnormal
2089 /// way, usually for debugging purposes.
2090 /// \p createNewBlock indicates whether to create a new block for the IR
2091 /// builder. Since the `cir.trap` operation is a terminator, operations that
2092 /// follow a trap cannot be emitted after `cir.trap` in the same block. To
2093 /// ensure these operations get emitted successfully, you need to create a new
2094 /// dummy block and set the insertion point there before continuing from the
2095 /// trap operation.
2096 void emitTrap(mlir::Location loc, bool createNewBlock);
2097
2098 LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
2099
2100 mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
2101
2102 /// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
2103 /// checking is enabled. Otherwise, just emit an unreachable instruction.
2104 /// \p createNewBlock indicates whether to create a new block for the IR
2105 /// builder. Since the `cir.unreachable` operation is a terminator, operations
2106 /// that follow an unreachable point cannot be emitted after `cir.unreachable`
2107 /// in the same block. To ensure these operations get emitted successfully,
2108 /// you need to create a dummy block and set the insertion point there before
2109 /// continuing from the unreachable point.
2110 void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
2111
2112 /// This method handles emission of any variable declaration
2113 /// inside a function, including static vars etc.
2114 void emitVarDecl(const clang::VarDecl &d);
2115
2116 void emitVariablyModifiedType(QualType ty);
2117
2118 mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
2119
2120 std::optional<mlir::Value> emitRISCVBuiltinExpr(unsigned builtinID,
2121 const CallExpr *expr);
2122
2123 std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
2124 const CallExpr *expr);
2125
2126 /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
2127 /// nonnull, if 1\p LHS is marked _Nonnull.
2128 void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
2129 clang::SourceLocation loc);
2130
2131 /// An object to manage conditionally-evaluated expressions.
2133 CIRGenFunction &cgf;
2134 mlir::OpBuilder::InsertPoint insertPt;
2135
2136 public:
2138 : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
2139 ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
2140 : cgf(cgf), insertPt(ip) {}
2141
2143 assert(cgf.outermostConditional != this);
2144 if (!cgf.outermostConditional)
2145 cgf.outermostConditional = this;
2146 }
2147
2149 assert(cgf.outermostConditional != nullptr);
2150 if (cgf.outermostConditional == this)
2151 cgf.outermostConditional = nullptr;
2152 }
2153
2154 /// Returns the insertion point which will be executed prior to each
2155 /// evaluation of the conditional code. In LLVM OG, this method
2156 /// is called getStartingBlock.
2157 mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
2158 };
2159
2161 std::optional<LValue> lhs{}, rhs{};
2162 mlir::Value result{};
2163 };
2164
2165 // Return true if we're currently emitting one branch or the other of a
2166 // conditional expression.
2167 bool isInConditionalBranch() const { return outermostConditional != nullptr; }
2168
2169 void setBeforeOutermostConditional(mlir::Value value, Address addr) {
2170 assert(isInConditionalBranch());
2171 {
2172 mlir::OpBuilder::InsertionGuard guard(builder);
2173 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
2174 builder.createStore(
2175 value.getLoc(), value, addr, /*isVolatile=*/false,
2176 mlir::IntegerAttr::get(
2177 mlir::IntegerType::get(value.getContext(), 64),
2178 (uint64_t)addr.getAlignment().getAsAlign().value()));
2179 }
2180 }
2181
2182 // Points to the outermost active conditional control. This is used so that
2183 // we know if a temporary should be destroyed conditionally.
2185
2186 /// An RAII object to record that we're evaluating a statement
2187 /// expression.
2189 CIRGenFunction &cgf;
2190
2191 /// We have to save the outermost conditional: cleanups in a
2192 /// statement expression aren't conditional just because the
2193 /// StmtExpr is.
2194 ConditionalEvaluation *savedOutermostConditional;
2195
2196 public:
2198 : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
2199 cgf.outermostConditional = nullptr;
2200 }
2201
2203 cgf.outermostConditional = savedOutermostConditional;
2204 }
2205 };
2206
2207 template <typename FuncTy>
2208 ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
2209 const FuncTy &branchGenFunc);
2210
2211 mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
2212 const clang::Stmt *thenS,
2213 const clang::Stmt *elseS);
2214
2215 /// Build a "reference" to a va_list; this is either the address or the value
2216 /// of the expression, depending on how va_list is defined.
2217 Address emitVAListRef(const Expr *e);
2218
2219 /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
2220 ///
2221 /// \param vaList A reference to the \c va_list as emitted by either
2222 /// \c emitVAListRef or \c emitMSVAListRef.
2223 void emitVAStart(mlir::Value vaList);
2224
2225 /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
2226 ///
2227 /// \param vaList A reference to the \c va_list as emitted by either
2228 /// \c emitVAListRef or \c emitMSVAListRef.
2229 void emitVAEnd(mlir::Value vaList);
2230
2231 /// Generate code to get an argument from the passed in pointer
2232 /// and update it accordingly.
2233 ///
2234 /// \param ve The \c VAArgExpr for which to generate code.
2235 ///
2236 /// \param vaListAddr Receives a reference to the \c va_list as emitted by
2237 /// either \c emitVAListRef or \c emitMSVAListRef.
2238 ///
2239 /// \returns SSA value with the argument.
2240 mlir::Value emitVAArg(VAArgExpr *ve);
2241
2242 /// ----------------------
2243 /// CIR build helpers
2244 /// -----------------
2245public:
2246 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2247 const Twine &name = "tmp",
2248 mlir::Value arraySize = nullptr,
2249 bool insertIntoFnEntryBlock = false);
2250 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2251 const Twine &name = "tmp",
2252 mlir::OpBuilder::InsertPoint ip = {},
2253 mlir::Value arraySize = nullptr);
2254 Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
2255 const Twine &name = "tmp",
2256 mlir::Value arraySize = nullptr,
2257 Address *alloca = nullptr,
2258 mlir::OpBuilder::InsertPoint ip = {});
2259 Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
2260 mlir::Location loc,
2261 const Twine &name = "tmp",
2262 mlir::Value arraySize = nullptr,
2263 mlir::OpBuilder::InsertPoint ip = {});
2264 Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc,
2265 const Twine &name);
2266
2267 /// Create a temporary memory object of the given type, with
2268 /// appropriate alignmen and cast it to the default address space. Returns
2269 /// the original alloca instruction by \p Alloca if it is not nullptr.
2270 Address createMemTemp(QualType t, mlir::Location loc,
2271 const Twine &name = "tmp", Address *alloca = nullptr,
2272 mlir::OpBuilder::InsertPoint ip = {});
2273 Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
2274 const Twine &name = "tmp", Address *alloca = nullptr,
2275 mlir::OpBuilder::InsertPoint ip = {});
2276
2277 mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const {
2278 if (cir::GlobalOp globalOp = v.getDefiningOp<cir::GlobalOp>())
2279 cgm.errorNYI("Global op addrspace cast");
2280 return builder.createAddrSpaceCast(v, destTy);
2281 }
2282
2283 //===--------------------------------------------------------------------===//
2284 // OpenMP Emission
2285 //===--------------------------------------------------------------------===//
2286public:
2287 mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
2288 mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
2289 mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
2290 mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
2291 mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
2292 mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
2293 mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
2294 mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
2295 mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
2296 mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
2297 mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
2298 mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
2299 mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
2300 mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
2301 mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
2302 mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
2303 mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
2304 mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
2305 mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
2306 mlir::LogicalResult
2307 emitOMPParallelForDirective(const OMPParallelForDirective &s);
2308 mlir::LogicalResult
2309 emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
2310 mlir::LogicalResult
2311 emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
2312 mlir::LogicalResult
2313 emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
2314 mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
2315 mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
2316 mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
2317 mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
2318 mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
2319 mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
2320 mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
2321 mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
2322 mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
2323 mlir::LogicalResult
2325 mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
2326 mlir::LogicalResult
2328 mlir::LogicalResult
2330 mlir::LogicalResult
2332 mlir::LogicalResult
2334 mlir::LogicalResult
2336 mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
2337 mlir::LogicalResult
2339 mlir::LogicalResult
2341 mlir::LogicalResult
2343 mlir::LogicalResult
2345 mlir::LogicalResult
2347 mlir::LogicalResult
2349 mlir::LogicalResult
2350 emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
2351 mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
2355 mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
2359 mlir::LogicalResult
2361 mlir::LogicalResult emitOMPDistributeParallelForDirective(
2365 mlir::LogicalResult
2369 mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
2371 mlir::LogicalResult
2373 mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
2375 mlir::LogicalResult
2377 mlir::LogicalResult
2379 mlir::LogicalResult
2385 mlir::LogicalResult
2387 mlir::LogicalResult
2389 mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
2397 mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
2398 mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
2399 mlir::LogicalResult
2401 mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
2402 mlir::LogicalResult emitOMPSplitDirective(const OMPSplitDirective &s);
2403 mlir::LogicalResult
2405 mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
2406 mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
2407 mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
2408
2412 void emitOMPAllocateDecl(const OMPAllocateDecl &d);
2415 void emitOMPRequiresDecl(const OMPRequiresDecl &d);
2416
2417private:
2418 template <typename Op>
2419 void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
2420
2421 //===--------------------------------------------------------------------===//
2422 // OpenACC Emission
2423 //===--------------------------------------------------------------------===//
2424private:
2425 template <typename Op>
2426 Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
2428 // Function to do the basic implementation of an operation with an Associated
2429 // Statement. Models AssociatedStmtConstruct.
2430 template <typename Op, typename TermOp>
2431 mlir::LogicalResult
2432 emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
2433 OpenACCDirectiveKind dirKind,
2435 const Stmt *associatedStmt);
2436
2437 template <typename Op, typename TermOp>
2438 mlir::LogicalResult emitOpenACCOpCombinedConstruct(
2439 mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
2440 llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
2441
2442 template <typename Op>
2443 void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
2445 // The second template argument doesn't need to be a template, since it should
2446 // always be an mlir::acc::LoopOp, but as this is a template anyway, we make
2447 // it a template argument as this way we can avoid including the OpenACC MLIR
2448 // headers here. We will count on linker failures/explicit instantiation to
2449 // ensure we don't mess this up, but it is only called from 1 place, and
2450 // instantiated 3x.
2451 template <typename ComputeOp, typename LoopOp>
2452 void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
2453 OpenACCDirectiveKind dirKind,
2455
2456 // The OpenACC LoopOp requires that we have auto, seq, or independent on all
2457 // LoopOp operations for the 'none' device type case. This function checks if
2458 // the LoopOp has one, else it updates it to have one.
2459 void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
2461
2462 // The OpenACC 'cache' construct actually applies to the 'loop' if present. So
2463 // keep track of the 'loop' so that we can add the cache vars to it correctly.
2464 mlir::acc::LoopOp *activeLoopOp = nullptr;
2465
2466 struct ActiveOpenACCLoopRAII {
2467 CIRGenFunction &cgf;
2468 mlir::acc::LoopOp *oldLoopOp;
2469
2470 ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
2471 : cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
2472 cgf.activeLoopOp = newOp;
2473 }
2474 ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
2475 };
2476
2477 // Keep track of the last place we inserted a 'recipe' so that we can insert
2478 // the next one in lexical order.
2479 mlir::OpBuilder::InsertPoint lastRecipeLocation;
2480
2481public:
2482 // Helper type used to store the list of important information for a 'data'
2483 // clause variable, or a 'cache' variable reference.
2485 mlir::Location beginLoc;
2486 mlir::Value varValue;
2487 std::string name;
2488 // The type of the original variable reference: that is, after 'bounds' have
2489 // removed pointers/array types/etc. So in the case of int arr[5], and a
2490 // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
2494 // The list of types that we found when going through the bounds, which we
2495 // can use to properly set the alloca section.
2497 };
2498
2499 // Gets the collection of info required to lower and OpenACC clause or cache
2500 // construct variable reference.
2502 // Helper function to emit the integer expressions as required by an OpenACC
2503 // clause/construct.
2504 mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
2505 // Helper function to emit an integer constant as an mlir int type, used for
2506 // constants in OpenACC constructs/clauses.
2507 mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
2508 int64_t value);
2509
2510 mlir::LogicalResult
2512 mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
2513 mlir::LogicalResult
2515 mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
2516 mlir::LogicalResult
2518 mlir::LogicalResult
2520 mlir::LogicalResult
2522 mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
2523 mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
2524 mlir::LogicalResult
2526 mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
2527 mlir::LogicalResult
2529 mlir::LogicalResult
2531 mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
2532
2535
2536 /// Create a temporary memory object for the given aggregate type.
2537 AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
2538 const Twine &name = "tmp",
2539 Address *alloca = nullptr) {
2541 return AggValueSlot::forAddr(
2542 createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
2545 }
2546
2547private:
2548 QualType getVarArgType(const Expr *arg);
2549
2550 class InlinedInheritingConstructorScope {
2551 public:
2552 InlinedInheritingConstructorScope(CIRGenFunction &cgf, GlobalDecl gd)
2553 : cgf(cgf), oldCurGD(cgf.curGD), oldCurFuncDecl(cgf.curFuncDecl),
2554 oldCurCodeDecl(cgf.curCodeDecl),
2555 oldCxxabiThisDecl(cgf.cxxabiThisDecl),
2556 oldCxxThisValue(cgf.cxxThisValue),
2557 oldCxxabiThisAlignment(cgf.cxxabiThisAlignment),
2558 oldCxxThisAlignment(cgf.cxxThisAlignment),
2559 oldReturnValue(cgf.returnValue), oldFnRetTy(cgf.fnRetTy),
2560 oldCxxInheritedCtorInitExprArgs(
2561 std::move(cgf.cxxInheritedCtorInitExprArgs)) {
2562 cgf.curGD = gd;
2564 cgf.curCodeDecl = cgf.curFuncDecl;
2565 cgf.cxxabiThisDecl = nullptr;
2566 cgf.cxxabiThisValue = nullptr;
2567 cgf.cxxThisValue = nullptr;
2571 cgf.fnRetTy = QualType();
2572 cgf.cxxInheritedCtorInitExprArgs.clear();
2573 // FIXME: at one point when we want to call one of these, we'll need
2574 // CXXInheritedCtorInitExprArgs here too.
2575 }
2576 ~InlinedInheritingConstructorScope() {
2577 cgf.curGD = oldCurGD;
2578 cgf.curFuncDecl = oldCurFuncDecl;
2579 cgf.curCodeDecl = oldCurCodeDecl;
2580 cgf.cxxabiThisDecl = oldCxxabiThisDecl;
2581 cgf.cxxabiThisValue = oldCxxabiThisValue;
2582 cgf.cxxThisValue = oldCxxThisValue;
2583 cgf.cxxThisAlignment = oldCxxThisAlignment;
2584 cgf.cxxabiThisAlignment = oldCxxabiThisAlignment;
2585 cgf.returnValue = oldReturnValue;
2586 cgf.fnRetTy = oldFnRetTy;
2588 std::move(oldCxxInheritedCtorInitExprArgs);
2589 }
2590
2591 private:
2592 CIRGenFunction &cgf;
2593 GlobalDecl oldCurGD;
2594 const Decl *oldCurFuncDecl;
2595 const Decl *oldCurCodeDecl;
2596 ImplicitParamDecl *oldCxxabiThisDecl;
2597 mlir::Value oldCxxabiThisValue;
2598 mlir::Value oldCxxThisValue;
2599 clang::CharUnits oldCxxabiThisAlignment;
2600 clang::CharUnits oldCxxThisAlignment;
2601 Address oldReturnValue;
2602 QualType oldFnRetTy;
2603 CallArgList oldCxxInheritedCtorInitExprArgs;
2604 };
2605};
2606
2607} // namespace clang::CIRGen
2608
2609#endif
Defines the clang::ASTContext interface.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
Defines the clang::Expr interface and subclasses for C++ expressions.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
Enumerates target-specific builtins in their own namespaces within namespace clang.
C Language Family Type Representation.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
This represents 'pragma omp cancel' directive.
This represents 'pragma omp cancellation point' directive.
This represents 'pragma omp dispatch' directive.
This represents 'pragma omp distribute' directive.
This represents 'pragma omp distribute parallel for' composite directive.
This represents 'pragma omp distribute parallel for simd' composite directive.
This represents 'pragma omp distribute simd' composite directive.
This represents 'pragma omp error' directive.
Represents the 'pragma omp fuse' loop transformation directive.
This represents 'pragma omp loop' directive.
Represents the 'pragma omp interchange' loop transformation directive.
This represents 'pragma omp interop' directive.
This represents 'pragma omp masked' directive.
This represents 'pragma omp masked taskloop' directive.
This represents 'pragma omp masked taskloop simd' directive.
This represents 'pragma omp master taskloop' directive.
This represents 'pragma omp master taskloop simd' directive.
This represents 'pragma omp metadirective' directive.
This represents 'pragma omp parallel loop' directive.
This represents 'pragma omp parallel masked taskloop' directive.
This represents 'pragma omp parallel masked taskloop simd' directive.
This represents 'pragma omp parallel master taskloop' directive.
This represents 'pragma omp parallel master taskloop simd' directive.
Represents the 'pragma omp reverse' loop transformation directive.
This represents 'pragma omp scan' directive.
Represents the 'pragma omp split' loop transformation directive.
This represents the 'pragma omp stripe' loop transformation directive.
This represents 'pragma omp target data' directive.
This represents 'pragma omp target' directive.
This represents 'pragma omp target enter data' directive.
This represents 'pragma omp target exit data' directive.
This represents 'pragma omp target parallel' directive.
This represents 'pragma omp target parallel for' directive.
This represents 'pragma omp target parallel for simd' directive.
This represents 'pragma omp target parallel loop' directive.
This represents 'pragma omp target simd' directive.
This represents 'pragma omp target teams' directive.
This represents 'pragma omp target teams distribute' combined directive.
This represents 'pragma omp target teams distribute parallel for' combined directive.
This represents 'pragma omp target teams distribute parallel for simd' combined directive.
This represents 'pragma omp target teams distribute simd' combined directive.
This represents 'pragma omp target teams loop' directive.
This represents 'pragma omp target update' directive.
This represents 'pragma omp taskloop' directive.
This represents 'pragma omp taskloop simd' directive.
This represents 'pragma omp teams' directive.
This represents 'pragma omp teams distribute' directive.
This represents 'pragma omp teams distribute parallel for' composite directive.
This represents 'pragma omp teams distribute parallel for simd' composite directive.
This represents 'pragma omp teams distribute simd' combined directive.
This represents 'pragma omp teams loop' directive.
This represents the 'pragma omp tile' loop transformation directive.
This represents the 'pragma omp unroll' loop transformation directive.
This class represents a 'loop' construct. The 'loop' construct applies to a 'for' loop (or range-for ...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3772
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3278
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6927
Represents an attribute applied to a statement.
Definition Stmt.h:2204
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition Expr.h:4456
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition Expr.h:4494
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition Expr.h:4491
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
BreakStmt - This represents a break.
Definition Stmt.h:3136
mlir::Value getPointer() const
Definition Address.h:96
static Address invalid()
Definition Address.h:74
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Value getBasePointer() const
Definition Address.h:101
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
An abstract representation of regular/ObjC call/message targets.
AbstractCallee(const clang::FunctionDecl *fd)
const clang::ParmVarDecl * getParamDecl(unsigned I) const
ArrayInitLoopExprScope(CIRGenFunction &cgf, bool setIdx, mlir::Value index)
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
An object to manage conditionally-evaluated expressions.
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
mlir::OpBuilder::InsertPoint getInsertPoint() const
Returns the insertion point which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forReference(mlir::TypedAttr c)
static ConstantEmission forValue(mlir::TypedAttr c)
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
FullExprCleanupScope(CIRGenFunction &cgf, const Expr *subExpr)
void exit(ArrayRef< mlir::Value * > valuesToReload={})
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, LValue lvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RunCleanupsScope(CIRGenFunction &cgf)
Enter a new cleanup scope.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool hasPendingCleanups() const
Whether there are any pending cleanups that have been pushed since this scope was entered.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void restore()
Can be used to restore the state early, before the dtor is run.
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value)
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
static bool hasScalarEvaluationKind(clang::QualType type)
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
void emitOpenACCRoutine(const OpenACCRoutineDecl &d)
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, cir::GlobalOp gv, cir::GetGlobalOp gvAddr)
Add the initializer for 'd' to the global variable that has already been created for it.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo, GlobalDecl gd, const ThunkInfo &thunk, bool isUnprototyped)
Generate code for a thunk function.
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard SourceLocExprScopeGuard
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, mlir::OpBuilder::InsertPoint ip={})
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
ImplicitParamDecl * cxxabiThisDecl
CXXThisDecl - When generating code for a C++ member function, this will hold the implicit 'this' decl...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
bool curFuncIsThunk
In C++, whether we are code generating a thunk.
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
llvm::SmallVector< PendingCleanupEntry > lifetimeExtendedCleanupStack
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
llvm::ScopedHashTable< const clang::Decl *, mlir::Value > SymTableTy
The symbol table maps a variable name to a value in the current scope.
void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc)
Definition CIRGenCXX.cpp:33
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, CallArgList &callArgs)
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
void emitOMPRequiresDecl(const OMPRequiresDecl &d)
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
Address cxxDefaultInitExprThis
The value of 'this' to sue when evaluating CXXDefaultInitExprs within this expression.
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
void setBeforeOutermostConditional(mlir::Value value, Address addr)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::Value loadCXXThis()
Load the value for 'this'.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void addCatchHandlerAttr(const CXXCatchStmt *catchStmt, SmallVector< mlir::Attribute > &handlerAttrs)
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
const clang::Decl * curFuncDecl
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
LValue emitLValueForLambdaField(const FieldDecl *field)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOrTryCall=nullptr)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
bool isTrivialInitializer(const Expr *init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void emitOpenACCDeclare(const OpenACCDeclareDecl &d)
void emitInlinedInheritingCXXConstructorCall(SourceLocation loc, const CXXConstructorDecl *d, CXXCtorType ctorType, bool forVirtualBase, bool delegating, CallArgList &args)
Address getAddrOfLocalVar(const clang::VarDecl *vd)
Return the address of a local variable.
void emitAnyExprToExn(const Expr *e, Address addr)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr, cir::FuncOp callee)
Emit a musttail call for a thunk with a potentially different ABI.
void pushIrregularPartialArrayCleanup(mlir::Value arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Push an EH cleanup to destroy already-constructed elements of the given array.
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy)
void pushCleanupAndDeferDeactivation(CleanupKind kind, As... a)
Push a cleanup and record it for deferred deactivation.
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, bool delegating)
Return the VTT parameter that should be passed to a base constructor/destructor with virtual bases.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
void initializeVTablePointers(mlir::Location loc, const clang::CXXRecordDecl *rd)
mlir::Type convertType(const TypeDecl *t)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d)
void emitAggregateStore(mlir::Value value, Address dest)
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
ConditionalEvaluation * outermostConditional
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit)
RValue emitAtomicExpr(AtomicExpr *e)
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, LValue lvalue, bool capturedByInit=false)
Emit an expression as an initializer for an object (variable, field, etc.) at the given location.
void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp, bool performInit)
Emit a guarded initializer for a static local variable or a static data member of a class template in...
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
llvm::SmallVector< DeferredDeactivateCleanup > deferredDeactivationCleanupStack
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass)
const TargetCIRGenInfo & getTargetHooks() const
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
RValue emitCoyieldExpr(const CoyieldExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
std::optional< mlir::Value > emitRISCVBuiltinExpr(unsigned builtinID, const CallExpr *expr)
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e)
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
CleanupKind getCleanupKind(QualType::DestructionKind kind)
clang::CharUnits cxxabiThisAlignment
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
std::pair< mlir::Value, mlir::Type > emitAsmInputLValue(const TargetInfo::ConstraintInfo &info, LValue inputValue, QualType inputType, std::string &constraintString, SourceLocation loc)
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
void emitOMPAllocateDecl(const OMPAllocateDecl &d)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
CallArgList cxxInheritedCtorInitExprArgs
The values of function arguments to use when evaluating CXXInheritedCtorInitExprs within this context...
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType)
ImplicitParamDecl * cxxStructorImplicitParamDecl
When generating code for a constructor or destructor, this will hold the implicit argument (e....
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
void startThunk(cir::FuncOp fn, GlobalDecl gd, const CIRGenFunctionInfo &fnInfo, bool isUnprototyped)
Start generating a thunk function.
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
llvm::SmallVector< PendingCleanupEntry > deferredConditionalCleanupStack
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::Type convertTypeForMem(QualType t)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s, cxxTryBodyEmitter &bodyCallback)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
LValue emitAggExprToLValue(const Expr *e)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
clang::CurrentSourceLocExprScope curSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
mlir::Value loadCXXVTT()
Load the VTT parameter to base constructors/destructors have virtual bases.
void emitVarDecl(const clang::VarDecl &d)
This method handles emission of any variable declaration inside a function, including static vars etc...
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts, mlir::Location loc)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr, SmallVectorImpl< mlir::Value > &ops, clang::SVETypeFlags typeFlags)
Address returnValue
The temporary alloca to hold the return value.
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo)
void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk, bool isUnprototyped)
Emit the call and return for a thunk function.
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
mlir::Value getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr, const clang::CXXRecordDecl *vtableClass)
Return the Value of the vtable pointer member pointed to by thisAddr.
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Destroys all the elements of the given array, beginning from last to first.
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
RValue emitAnyExprToTemp(const clang::Expr *e)
Similarly to emitAnyExpr(), however, the result will always be accessible even if no aggregate locati...
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS)
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::SmallPtrSet< const clang::CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
Address createCleanupActiveFlag()
Create an active flag variable for use with conditional cleanups.
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
bool hasVolatileMember(QualType t)
returns true if aggregate type has a volatile member.
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
void emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType)
clang::FieldDecl * lambdaThisCaptureField
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
void emitConstructorBody(FunctionArgList &args)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
bool haveInsertPoint() const
True if an insertion point is defined.
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void maybeEmitDeferredVarDeclInit(const VarDecl *vd)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
std::optional< mlir::Value > emitAArch64SMEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void pushStackRestore(CleanupKind kind, Address spMem)
LValue emitPseudoObjectLValue(const PseudoObjectExpr *E)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
void emitAutoVarDecl(const clang::VarDecl &d)
Emit code and set up symbol table for a variable declaration with auto, register, or no storage class...
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
bool didCallStackSave
Whether a cir.stacksave operation has been added.
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
mlir::Value emitOpenACCIntExpr(const Expr *intExpr)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer)
Immediately perform the destruction of the given object.
void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc, AbstractCallee ac, unsigned paramNum)
Create a check for a function parameter that may potentially be declared as non-null.
void pushPendingCleanupToEHStack(const PendingCleanupEntry &entry)
Promote a single pending cleanup entry onto the EH scope stack.
mlir::LogicalResult emitOMPSplitDirective(const OMPSplitDirective &s)
const CIRGenModule & getCIRGenModule() const
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, llvm::ArrayRef< mlir::Value > args={}, mlir::NamedAttrList attrs={})
void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty)
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitInitListLValue(const InitListExpr *e)
mlir::Value arrayInitIndex
The current array initialization index when evaluating an ArrayInitIndexExpr within an ArrayInitLoopE...
void emitAtomicInit(Expr *init, LValue dest)
void popCleanupBlock(bool forDeactivation=false)
Pop a cleanup block from the stack.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, int64_t value)
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
void pushEHDestroyIfNeeded(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushEHDestroyIfNeeded - Push the standard destructor for the given type as an EH-only cleanup.
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d)
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
void emitCXXThrowExpr(const CXXThrowExpr *e)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
llvm::SmallVector< VPtr, 4 > VPtrsVector
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
bool sawAsmBlock
Whether or not a Microsoft-style asm block has been processed within this fuction.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
std::pair< mlir::Value, mlir::Type > emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr, std::string &constraintString)
EHScopeStack::stable_iterator currentCleanupStackDepth
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
void pushFullExprCleanup(CleanupKind kind, As... a)
Push a cleanup to be run at the end of the current full-expression.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc)
We are performing a delegate call; that is, the current function is delegating to another one.
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
const CIRGenFunctionInfo * curFnInfo
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc)
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
void emitInheritedCXXConstructorCall(const CXXConstructorDecl *d, bool forVirtualBase, Address thisAddr, bool inheritedFromVBase, const CXXInheritedCtorInitExpr *e)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
void pushCleanupAfterFullExpr(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer)
Queue a cleanup to be pushed after finishing the current full-expression.
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
std::optional< mlir::Value > emitAArch64SVEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args)
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize=nullptr)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
void finishThunk()
Finish generating a thunk function.
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
void emitAutoVarCleanups(const AutoVarEmission &emission)
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:49
A saved depth on the scope stack.
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a call to a CUDA kernel function.
Definition ExprCXX.h:235
CXXCatchStmt - This represents a C++ catch block.
Definition StmtCXX.h:28
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
Represents a C++ constructor within a class.
Definition DeclCXX.h:2624
Represents a C++ base or member initializer.
Definition DeclCXX.h:2389
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1271
A use of a default initializer in a constructor or in aggregate initialization.
Definition ExprCXX.h:1378
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2627
Represents a C++ destructor within a class.
Definition DeclCXX.h:2889
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:482
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition ExprCXX.h:1752
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2356
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:85
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2746
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
Represents a C++ temporary.
Definition ExprCXX.h:1460
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1209
CXXTryStmt - A C++ try block, including all handlers.
Definition StmtCXX.h:69
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
CaseStmt - Represent a case statement.
Definition Stmt.h:1921
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1741
ContinueStmt - This represents a continue.
Definition Stmt.h:3120
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
SourceLocExprScopeGuard(const Expr *DefaultExpr, CurrentSourceLocExprScope &Current)
Represents the current source location and context used to determine the value of the source location...
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1632
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2833
This represents one expression.
Definition Expr.h:112
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
Represents a member of a struct/union/class.
Definition Decl.h:3175
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2889
Represents a function declaration or definition.
Definition Decl.h:2015
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
GotoStmt - This represents a direct goto.
Definition Stmt.h:2970
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3009
Describes an C or C++ initializer list.
Definition Expr.h:5302
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3703
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents 'pragma omp allocate ...' directive.
Definition DeclOpenMP.h:536
Pseudo declaration for capturing expressions.
Definition DeclOpenMP.h:445
This represents 'pragma omp declare mapper ...' directive.
Definition DeclOpenMP.h:349
This represents 'pragma omp declare reduction ...' directive.
Definition DeclOpenMP.h:239
This represents 'pragma omp groupprivate ...' directive.
Definition DeclOpenMP.h:173
This represents 'pragma omp requires...' directive.
Definition DeclOpenMP.h:479
This represents 'pragma omp threadprivate ...' directive.
Definition DeclOpenMP.h:110
ObjCMethodDecl - Represents an instance or class method declaration.
Definition DeclObjC.h:140
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
Represents a parameter to a function.
Definition Decl.h:1805
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6803
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8471
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Flags to identify the types for overloaded SVE builtins.
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:227
Represents a declaration of a type.
Definition Decl.h:3528
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
const internal::VariadicDynCastAllOfMatcher< Decl, VarDecl > varDecl
Matches variable declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
@ Address
A pointer to a ValueDecl.
Definition Primitives.h:28
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
OpenACCDirectiveKind
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start, SourceLocation DirectiveLoc, SourceLocation End, ArrayRef< const OpenACCClause * > Clauses, Stmt *StructuredBlock)
CXXDtorType
C++ destructor types.
Definition ABI.h:34
U cast(CodeGen::Address addr)
Definition Address.h:327
#define true
Definition stdbool.h:25
static bool aggValueSlot()
static bool peepholeProtection()
static bool opAllocaEscapeByReference()
static bool generateDebugInfo()
AutoVarEmission(const clang::VarDecl &variable)
bool isEscapingByRef
True if the variable is a __block variable that is captured by an escaping block.
Address addr
The address of the alloca for languages with explicit address space (e.g.
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
bool isConstantAggregate
True if the variable is of aggregate type and has a constant initializer.
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
Address getObjectAddress(CIRGenFunction &cgf) const
Returns the address of the object within this declaration.
std::unique_ptr< CGCoroData > data
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
Scope that deactivates all enclosed deferred cleanups on exit.
A cleanup that was pushed to the EH stack but whose deactivation is deferred until the enclosing Clea...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
mlir::Block * getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc)
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
void updateRetLoc(mlir::Block *b, mlir::Location loc)
mlir::Location getRetLoc(mlir::Block *b)
A cleanup entry that will be promoted onto the EH scope stack at a later point.
llvm::PointerUnion< const clang::FunctionProtoType *, const clang::ObjCMethodDecl * > p
PrototypeWrapper(const clang::ObjCMethodDecl *md)
PrototypeWrapper(const clang::FunctionProtoType *ft)
const clang::CXXRecordDecl * vtableClass
const clang::CXXRecordDecl * nearestVBase
VlaSizePair(mlir::Value num, QualType ty)
virtual mlir::LogicalResult operator()(CIRGenFunction &cgf)=0
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition Thunk.h:157