clang 23.0.0git
CIRGenFunction.h
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
14#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
15
16#include "CIRGenBuilder.h"
17#include "CIRGenCall.h"
18#include "CIRGenModule.h"
19#include "CIRGenTypeCache.h"
20#include "CIRGenValue.h"
21#include "EHScopeStack.h"
22
23#include "Address.h"
24
27#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/ExprCXX.h"
31#include "clang/AST/Stmt.h"
32#include "clang/AST/Type.h"
38#include "llvm/ADT/ScopedHashTable.h"
39#include "llvm/IR/Instructions.h"
40
41namespace {
42class ScalarExprEmitter;
43} // namespace
44
45namespace mlir {
46namespace acc {
47class LoopOp;
48} // namespace acc
49} // namespace mlir
50
51namespace clang::CIRGen {
52
53struct CGCoroData;
54
56public:
58
59private:
60 friend class ::ScalarExprEmitter;
61 /// The builder is a helper class to create IR inside a function. The
62 /// builder is stateful, in particular it keeps an "insertion point": this
63 /// is where the next operations will be introduced.
64 CIRGenBuilderTy &builder;
65
66public:
67 /// The GlobalDecl for the current function being compiled or the global
68 /// variable currently being initialized.
70
72
73 /// The compiler-generated variable that holds the return value.
74 std::optional<mlir::Value> fnRetAlloca;
75
76 // Holds coroutine data if the current function is a coroutine. We use a
77 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
78 // in this header.
79 struct CGCoroInfo {
80 std::unique_ptr<CGCoroData> data;
81 CGCoroInfo();
83 };
85
86 bool isCoroutine() const { return curCoro.data != nullptr; }
87
88 /// The temporary alloca to hold the return value. This is
89 /// invalid iff the function has no return value.
91
92 /// Tracks function scope overall cleanup handling.
94
96
97 /// A mapping from NRVO variables to the flags used to indicate
98 /// when the NRVO has been applied to this variable.
99 llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
100
101 llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
104
105 /// CXXThisDecl - When generating code for a C++ member function,
106 /// this will hold the implicit 'this' declaration.
108 mlir::Value cxxabiThisValue = nullptr;
109 mlir::Value cxxThisValue = nullptr;
111
112 /// When generating code for a constructor or destructor, this will hold the
113 /// implicit argument (e.g. VTT).
116
117 /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
118 /// expression.
120
121 // Holds the Decl for the current outermost non-closure context
122 const clang::Decl *curFuncDecl = nullptr;
123 /// This is the inner-most code context, which includes blocks.
124 const clang::Decl *curCodeDecl = nullptr;
126
127 /// The current function or global initializer that is generated code for.
128 /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
129 /// global initializers.
130 mlir::Operation *curFn = nullptr;
131
132 /// Save Parameter Decl for coroutine.
134
135 using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
136 /// This keeps track of the CIR allocas or globals for local C
137 /// declarations.
139
140 /// The type of the condition for the emitting switch statement.
142
143 clang::ASTContext &getContext() const { return cgm.getASTContext(); }
144
145 CIRGenBuilderTy &getBuilder() { return builder; }
146
148 const CIRGenModule &getCIRGenModule() const { return cgm; }
149
151 // We currently assume this isn't called for a global initializer.
152 auto fn = mlir::cast<cir::FuncOp>(curFn);
153 return &fn.getRegion().front();
154 }
155
156 /// Sanitizers enabled for this function.
158
160 public:
164
165 private:
166 void ConstructorHelper(clang::FPOptions FPFeatures);
167 CIRGenFunction &cgf;
168 clang::FPOptions oldFPFeatures;
169 llvm::fp::ExceptionBehavior oldExcept;
170 llvm::RoundingMode oldRounding;
171 };
173
174 /// The symbol table maps a variable name to a value in the current scope.
175 /// Entering a function creates a new scope, and the function arguments are
176 /// added to the mapping. When the processing of a function is terminated,
177 /// the scope is destroyed and the mappings created in this scope are
178 /// dropped.
179 using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
181
182 /// Whether a cir.stacksave operation has been added. Used to avoid
183 /// inserting cir.stacksave for multiple VLAs in the same scope.
184 bool didCallStackSave = false;
185
186 /// Whether or not a Microsoft-style asm block has been processed within
187 /// this fuction. These can potentially set the return value.
188 bool sawAsmBlock = false;
189
190 /// In C++, whether we are code generating a thunk. This controls whether we
191 /// should emit cleanups.
192 bool curFuncIsThunk = false;
193
194 mlir::Type convertTypeForMem(QualType t);
195
196 mlir::Type convertType(clang::QualType t);
197 mlir::Type convertType(const TypeDecl *t) {
198 return convertType(getContext().getTypeDeclType(t));
199 }
200
201 /// Get integer from a mlir::Value that is an int constant or a constant op.
202 static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
203 auto constOp = val.getDefiningOp<cir::ConstantOp>();
204 assert(constOp && "getSExtIntValueFromConstOp call with non ConstantOp");
205 return constOp.getIntValue().getSExtValue();
206 }
207
208 /// Get zero-extended integer from a mlir::Value that is an int constant or a
209 /// constant op.
210 static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
211 auto constOp = val.getDefiningOp<cir::ConstantOp>();
212 assert(constOp && "getZExtIntValueFromConstOp call with non ConstantOp");
213 return constOp.getIntValue().getZExtValue();
214 }
215
216 /// Return the cir::TypeEvaluationKind of QualType \c type.
218
222
226
228 bool suppressNewContext = false);
230
231 CIRGenTypes &getTypes() const { return cgm.getTypes(); }
232
233 const TargetInfo &getTarget() const { return cgm.getTarget(); }
234 mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
235
237 return cgm.getTargetCIRGenInfo();
238 }
239
240 // ---------------------
241 // Opaque value handling
242 // ---------------------
243
244 /// Keeps track of the current set of opaque value expressions.
245 llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
246 llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
247
248 // This keeps track of the associated size for each VLA type.
249 // We track this by the size expression rather than the type itself because
250 // in certain situations, like a const qualifier applied to an VLA typedef,
251 // multiple VLA types can share the same size expression.
252 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
253 // enter/leave scopes.
254 llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
255
256public:
257 /// A non-RAII class containing all the information about a bound
258 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
259 /// this which makes individual mappings very simple; using this
260 /// class directly is useful when you have a variable number of
261 /// opaque values or don't want the RAII functionality for some
262 /// reason.
263 class OpaqueValueMappingData {
264 const OpaqueValueExpr *opaqueValue;
265 bool boundLValue;
266
267 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
268 : opaqueValue(ov), boundLValue(boundLValue) {}
269
270 public:
271 OpaqueValueMappingData() : opaqueValue(nullptr) {}
272
273 static bool shouldBindAsLValue(const Expr *expr) {
274 // gl-values should be bound as l-values for obvious reasons.
275 // Records should be bound as l-values because IR generation
276 // always keeps them in memory. Expressions of function type
277 // act exactly like l-values but are formally required to be
278 // r-values in C.
279 return expr->isGLValue() || expr->getType()->isFunctionType() ||
281 }
282
284 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
285 if (shouldBindAsLValue(ov))
286 return bind(cgf, ov, cgf.emitLValue(e));
287 return bind(cgf, ov, cgf.emitAnyExpr(e));
288 }
289
291 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
292 assert(shouldBindAsLValue(ov));
293 cgf.opaqueLValues.insert(std::make_pair(ov, lv));
294 return OpaqueValueMappingData(ov, true);
295 }
296
298 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
299 assert(!shouldBindAsLValue(ov));
300 cgf.opaqueRValues.insert(std::make_pair(ov, rv));
301
302 OpaqueValueMappingData data(ov, false);
303
304 // Work around an extremely aggressive peephole optimization in
305 // EmitScalarConversion which assumes that all other uses of a
306 // value are extant.
308 return data;
309 }
310
311 bool isValid() const { return opaqueValue != nullptr; }
312 void clear() { opaqueValue = nullptr; }
313
315 assert(opaqueValue && "no data to unbind!");
316
317 if (boundLValue) {
318 cgf.opaqueLValues.erase(opaqueValue);
319 } else {
320 cgf.opaqueRValues.erase(opaqueValue);
322 }
323 }
324 };
325
326 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
328 CIRGenFunction &cgf;
330
331 public:
335
336 /// Build the opaque value mapping for the given conditional
337 /// operator if it's the GNU ?: extension. This is a common
338 /// enough pattern that the convenience operator is really
339 /// helpful.
340 ///
343 : cgf(cgf) {
344 if (mlir::isa<ConditionalOperator>(op))
345 // Leave Data empty.
346 return;
347
349 mlir::cast<BinaryConditionalOperator>(op);
351 e->getCommon());
352 }
353
354 /// Build the opaque value mapping for an OpaqueValueExpr whose source
355 /// expression is set to the expression the OVE represents.
357 : cgf(cgf) {
358 if (ov) {
359 assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
360 "for OVE with no source expression");
361 data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
362 }
363 }
364
366 LValue lvalue)
367 : cgf(cgf),
368 data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
369
371 RValue rvalue)
372 : cgf(cgf),
373 data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
374
375 void pop() {
376 data.unbind(cgf);
377 data.clear();
378 }
379
381 if (data.isValid())
382 data.unbind(cgf);
383 }
384 };
385
386private:
387 /// Declare a variable in the current scope, return success if the variable
388 /// wasn't declared yet.
389 void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
390 mlir::Location loc, clang::CharUnits alignment,
391 bool isParam = false);
392
393public:
394 mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
395
396 void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
397
398private:
399 // Track current variable initialization (if there's one)
400 const clang::VarDecl *currVarDecl = nullptr;
401 class VarDeclContext {
403 const clang::VarDecl *oldVal = nullptr;
404
405 public:
406 VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
407 if (p.currVarDecl)
408 oldVal = p.currVarDecl;
409 p.currVarDecl = value;
410 }
411
412 /// Can be used to restore the state early, before the dtor
413 /// is run.
414 void restore() { p.currVarDecl = oldVal; }
415 ~VarDeclContext() { restore(); }
416 };
417
418public:
419 /// Use to track source locations across nested visitor traversals.
420 /// Always use a `SourceLocRAIIObject` to change currSrcLoc.
421 std::optional<mlir::Location> currSrcLoc;
423 CIRGenFunction &cgf;
424 std::optional<mlir::Location> oldLoc;
425
426 public:
427 SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
428 if (cgf.currSrcLoc)
429 oldLoc = cgf.currSrcLoc;
430 cgf.currSrcLoc = value;
431 }
432
433 /// Can be used to restore the state early, before the dtor
434 /// is run.
435 void restore() { cgf.currSrcLoc = oldLoc; }
437 };
438
440 llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
441
442 /// Hold counters for incrementally naming temporaries
443 unsigned counterRefTmp = 0;
444 unsigned counterAggTmp = 0;
445 std::string getCounterRefTmpAsString();
446 std::string getCounterAggTmpAsString();
447
448 /// Helpers to convert Clang's SourceLocation to a MLIR Location.
449 mlir::Location getLoc(clang::SourceLocation srcLoc);
450 mlir::Location getLoc(clang::SourceRange srcLoc);
451 mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
452
453 const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
454
455 /// True if an insertion point is defined. If not, this indicates that the
456 /// current code being emitted is unreachable.
457 /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
458 /// since we don't yet force null insertion point to designate behavior (like
459 /// LLVM's codegen does) and we probably shouldn't.
460 bool haveInsertPoint() const {
461 return builder.getInsertionBlock() != nullptr;
462 }
463
464 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
465 // an ObjCMethodDecl.
467 llvm::PointerUnion<const clang::FunctionProtoType *,
468 const clang::ObjCMethodDecl *>
470
473 };
474
476
477 /// An abstract representation of regular/ObjC call/message targets.
479 /// The function declaration of the callee.
480 [[maybe_unused]] const clang::Decl *calleeDecl;
481
482 public:
483 AbstractCallee() : calleeDecl(nullptr) {}
484 AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
485
486 bool hasFunctionDecl() const {
487 return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
488 }
489
490 const clang::Decl *getDecl() const { return calleeDecl; }
491
492 unsigned getNumParams() const {
493 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
494 return fd->getNumParams();
495 return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
496 }
497
498 const clang::ParmVarDecl *getParamDecl(unsigned I) const {
499 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
500 return fd->getParamDecl(I);
501 return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
502 I);
503 }
504 };
505
506 struct VlaSizePair {
507 mlir::Value numElts;
509
510 VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
511 };
512
513 /// Return the number of elements for a single dimension
514 /// for the given array type.
515 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
516
517 /// Returns an MLIR::Value+QualType pair that corresponds to the size,
518 /// in non-variably-sized elements, of a variable length array type,
519 /// plus that largest non-variably-sized element type. Assumes that
520 /// the type has already been emitted with emitVariablyModifiedType.
521 VlaSizePair getVLASize(const VariableArrayType *type);
522 VlaSizePair getVLASize(QualType type);
523
525
526 mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
527 return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
528 }
529
530 void finishFunction(SourceLocation endLoc);
531
532 /// Determine whether the given initializer is trivial in the sense
533 /// that it requires no code to be generated.
534 bool isTrivialInitializer(const Expr *init);
535
536 /// If the specified expression does not fold to a constant, or if it does but
537 /// contains a label, return false. If it constant folds return true and set
538 /// the boolean result in Result.
539 bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
540 bool allowLabels = false);
542 llvm::APSInt &resultInt,
543 bool allowLabels = false);
544
545 /// Return true if the statement contains a label in it. If
546 /// this statement is not executed normally, it not containing a label means
547 /// that we can just remove the code.
548 bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
549
550 Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
551
552 class ConstantEmission {
553 // Cannot use mlir::TypedAttr directly here because of bit availability.
554 llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
555 ConstantEmission(mlir::TypedAttr c, bool isReference)
556 : valueAndIsReference(c, isReference) {}
557
558 public:
560 static ConstantEmission forReference(mlir::TypedAttr c) {
561 return ConstantEmission(c, true);
562 }
563 static ConstantEmission forValue(mlir::TypedAttr c) {
564 return ConstantEmission(c, false);
565 }
566
567 explicit operator bool() const {
568 return valueAndIsReference.getOpaqueValue() != nullptr;
569 }
570
571 bool isReference() const { return valueAndIsReference.getInt(); }
573 assert(isReference());
574 cgf.cgm.errorNYI(refExpr->getSourceRange(),
575 "ConstantEmission::getReferenceLValue");
576 return {};
577 }
578
579 mlir::TypedAttr getValue() const {
580 assert(!isReference());
581 return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
582 }
583 };
584
585 ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
586 ConstantEmission tryEmitAsConstant(const MemberExpr *me);
587
590 /// The address of the alloca for languages with explicit address space
591 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
592 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
593 /// as a global constant.
595
596 /// True if the variable is of aggregate type and has a constant
597 /// initializer.
599
600 /// True if the variable is a __block variable that is captured by an
601 /// escaping block.
602 bool isEscapingByRef = false;
603
604 /// True if the variable was emitted as an offload recipe, and thus doesn't
605 /// have the same sort of alloca initialization.
606 bool emittedAsOffload = false;
607
608 mlir::Value nrvoFlag{};
609
610 struct Invalid {};
612
615
617
618 bool wasEmittedAsGlobal() const { return !addr.isValid(); }
619
621
622 /// Returns the raw, allocated address, which is not necessarily
623 /// the address of the object itself. It is casted to default
624 /// address space for address space agnostic languages.
625 Address getAllocatedAddress() const { return addr; }
626
627 // Changes the stored address for the emission. This function should only
628 // be used in extreme cases, and isn't required to model normal AST
629 // initialization/variables.
631
632 /// Returns the address of the object within this declaration.
633 /// Note that this does not chase the forwarding pointer for
634 /// __block decls.
636 if (!isEscapingByRef)
637 return addr;
638
640 return Address::invalid();
641 }
642 };
643
644 /// IndirectBranch - The first time an indirect goto is seen we create a block
645 /// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
646 /// is emitted at the end of the function, once all block destinations have
647 /// been resolved.
648 mlir::Block *indirectGotoBlock = nullptr;
649
652
653 /// Perform the usual unary conversions on the specified expression and
654 /// compare the result against zero, returning an Int1Ty value.
655 mlir::Value evaluateExprAsBool(const clang::Expr *e);
656
657 cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
658 cir::GlobalOp gv,
659 cir::GetGlobalOp gvAddr);
660
661 /// Enter the cleanups necessary to complete the given phase of destruction
662 /// for a destructor. The end result should call destructors on members and
663 /// base classes in reverse order of their construction.
665
666 /// Determines whether an EH cleanup is required to destroy a type
667 /// with the given destruction kind.
668 /// TODO(cir): could be shared with Clang LLVM codegen
670 switch (kind) {
672 return false;
676 return getLangOpts().Exceptions;
678 return getLangOpts().Exceptions &&
679 cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
680 }
681 llvm_unreachable("bad destruction kind");
682 }
683
687
689
690 /// Set the address of a local variable.
692 assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
693 localDeclMap.insert({vd, addr});
694
695 // Add to the symbol table if not there already.
696 if (symbolTable.count(vd))
697 return;
698 symbolTable.insert(vd, addr.getPointer());
699 }
700
701 // Replaces the address of the local variable, if it exists. Else does the
702 // same thing as setAddrOfLocalVar.
704 localDeclMap.insert_or_assign(vd, addr);
705 }
706
707 // A class to allow reverting changes to a var-decl's registration to the
708 // localDeclMap. This is used in cases where things are being inserted into
709 // the variable list but don't follow normal lookup/search rules, like in
710 // OpenACC recipe generation.
712 CIRGenFunction &cgf;
713 const VarDecl *vd;
714 bool shouldDelete = false;
715 Address oldAddr = Address::invalid();
716
717 public:
719 : cgf(cgf), vd(vd) {
720 auto mapItr = cgf.localDeclMap.find(vd);
721
722 if (mapItr != cgf.localDeclMap.end())
723 oldAddr = mapItr->second;
724 else
725 shouldDelete = true;
726 }
727
729 if (shouldDelete)
730 cgf.localDeclMap.erase(vd);
731 else
732 cgf.localDeclMap.insert_or_assign(vd, oldAddr);
733 }
734 };
735
737
740
741 static bool
743
750
753
757 const clang::CXXRecordDecl *nearestVBase,
758 clang::CharUnits offsetFromNearestVBase,
759 bool baseIsNonVirtualPrimaryBase,
760 const clang::CXXRecordDecl *vtableClass,
761 VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
762 /// Return the Value of the vtable pointer member pointed to by thisAddr.
763 mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
764 const clang::CXXRecordDecl *vtableClass);
765
766 /// Returns whether we should perform a type checked load when loading a
767 /// virtual function for virtual calls to members of RD. This is generally
768 /// true when both vcall CFI and whole-program-vtables are enabled.
770
771 /// Source location information about the default argument or member
772 /// initializer expression we're evaluating, if any.
776
777 /// A scope within which we are constructing the fields of an object which
778 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
779 /// we need to evaluate the CXXDefaultInitExpr within the evaluation.
781 public:
783 : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
784 cgf.cxxDefaultInitExprThis = thisAddr;
785 }
787 cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
788 }
789
790 private:
791 CIRGenFunction &cgf;
792 Address oldCXXDefaultInitExprThis;
793 };
794
795 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
796 /// is overridden to be the object under construction.
798 public:
803 cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
804 cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
805 }
807 cgf.cxxThisValue = oldCXXThisValue;
808 cgf.cxxThisAlignment = oldCXXThisAlignment;
809 }
810
811 public:
813 mlir::Value oldCXXThisValue;
816 };
817
822
824 LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
825
826 /// Construct an address with the natural alignment of T. If a pointer to T
827 /// is expected to be signed, the pointer passed to this function must have
828 /// been signed, and the returned Address will have the pointer authentication
829 /// information needed to authenticate the signed pointer.
831 CharUnits alignment,
832 bool forPointeeType = false,
833 LValueBaseInfo *baseInfo = nullptr) {
834 if (alignment.isZero())
835 alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
836 return Address(ptr, convertTypeForMem(t), alignment);
837 }
838
840 Address value, const CXXRecordDecl *derived,
841 llvm::iterator_range<CastExpr::path_const_iterator> path,
842 bool nullCheckValue, SourceLocation loc);
843
845 mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
846 llvm::iterator_range<CastExpr::path_const_iterator> path,
847 bool nullCheckValue);
848
849 /// Return the VTT parameter that should be passed to a base
850 /// constructor/destructor with virtual bases.
851 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
852 /// to ItaniumCXXABI.cpp together with all the references to VTT.
853 mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
854 bool delegating);
855
858 return makeAddrLValue(addr, ty, LValueBaseInfo(source));
859 }
860
862 return LValue::makeAddr(addr, ty, baseInfo);
863 }
864
865 void initializeVTablePointers(mlir::Location loc,
866 const clang::CXXRecordDecl *rd);
867 void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
868
870
871 /// Return the address of a local variable.
873 auto it = localDeclMap.find(vd);
874 assert(it != localDeclMap.end() &&
875 "Invalid argument to getAddrOfLocalVar(), no decl!");
876 return it->second;
877 }
878
880 mlir::Type fieldType, unsigned index);
881
882 /// Given an opaque value expression, return its LValue mapping if it exists,
883 /// otherwise create one.
885
886 /// Given an opaque value expression, return its RValue mapping if it exists,
887 /// otherwise create one.
889
890 /// Load the value for 'this'. This function is only valid while generating
891 /// code for an C++ member function.
892 /// FIXME(cir): this should return a mlir::Value!
893 mlir::Value loadCXXThis() {
894 assert(cxxThisValue && "no 'this' value for this function");
895 return cxxThisValue;
896 }
898
899 /// Load the VTT parameter to base constructors/destructors have virtual
900 /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
901 /// be abstracted properly.
902 mlir::Value loadCXXVTT() {
903 assert(cxxStructorImplicitParamValue && "no VTT value for this function");
905 }
906
907 /// Convert the given pointer to a complete class to the given direct base.
909 Address value,
910 const CXXRecordDecl *derived,
911 const CXXRecordDecl *base,
912 bool baseIsVirtual);
913
914 /// Determine whether a return value slot may overlap some other object.
916 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
917 // class subobjects. These cases may need to be revisited depending on the
918 // resolution of the relevant core issue.
920 }
921
922 /// Determine whether a base class initialization may overlap some other
923 /// object.
925 const CXXRecordDecl *baseRD,
926 bool isVirtual);
927
928 /// Get an appropriate 'undef' rvalue for the given type.
929 /// TODO: What's the equivalent for MLIR? Currently we're only using this for
930 /// void types so it just returns RValue::get(nullptr) but it'll need
931 /// addressed later.
933
934 cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
935 cir::FuncType funcType);
936
938 FunctionArgList &args);
939
940 /// Emit the function prologue: declare function arguments in the symbol
941 /// table.
942 void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
943 const FunctionDecl *fd, SourceLocation bodyBeginLoc);
944
945 /// Emit code for the start of a function.
946 /// \param loc The location to be associated with the function.
947 /// \param startLoc The location of the function body.
949 cir::FuncOp fn, cir::FuncType funcType,
951 clang::SourceLocation startLoc);
952
953 /// returns true if aggregate type has a volatile member.
955 if (const auto *rd = t->getAsRecordDecl())
956 return rd->hasVolatileMember();
957 return false;
958 }
959
960 void addCatchHandlerAttr(const CXXCatchStmt *catchStmt,
961 SmallVector<mlir::Attribute> &handlerAttrs);
962
963 /// The cleanup depth enclosing all the cleanups associated with the
964 /// parameters.
966
968
969 /// Takes the old cleanup stack size and emits the cleanup blocks
970 /// that have been added.
971 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
972 ArrayRef<mlir::Value *> valuesToReload = {});
973 void popCleanupBlock();
974
975 /// Deactivates the given cleanup block. The block cannot be reactivated. Pops
976 /// it if it's the top of the stack.
977 ///
978 /// \param DominatingIP - An instruction which is known to
979 /// dominate the current IP (if set) and which lies along
980 /// all paths of execution between the current IP and the
981 /// the point at which the cleanup comes into scope.
983 mlir::Operation *dominatingIP);
984
985 /// Push a cleanup to be run at the end of the current full-expression. Safe
986 /// against the possibility that we're currently inside a
987 /// conditionally-evaluated expression.
988 template <class T, class... As>
990 // If we're not in a conditional branch, or if none of the
991 // arguments requires saving, then use the unconditional cleanup.
993 return ehStack.pushCleanup<T>(kind, a...);
994
995 cgm.errorNYI("pushFullExprCleanup in conditional branch");
996 }
997
998 /// Enters a new scope for capturing cleanups, all of which
999 /// will be executed once the scope is exited.
1000 class RunCleanupsScope {
1001 EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
1002
1003 protected:
1006
1007 private:
1008 RunCleanupsScope(const RunCleanupsScope &) = delete;
1009 void operator=(const RunCleanupsScope &) = delete;
1010
1011 protected:
1013
1014 public:
1015 /// Enter a new cleanup scope.
1017 : performCleanup(true), cgf(cgf) {
1018 cleanupStackDepth = cgf.ehStack.stable_begin();
1019 oldDidCallStackSave = cgf.didCallStackSave;
1020 cgf.didCallStackSave = false;
1021 oldCleanupStackDepth = cgf.currentCleanupStackDepth;
1022 cgf.currentCleanupStackDepth = cleanupStackDepth;
1023 }
1024
1025 /// Exit this cleanup scope, emitting any accumulated cleanups.
1027 if (performCleanup)
1028 forceCleanup();
1029 }
1030
1031 /// Force the emission of cleanups now, instead of waiting
1032 /// until this object is destroyed.
1033 void forceCleanup(ArrayRef<mlir::Value *> valuesToReload = {}) {
1034 assert(performCleanup && "Already forced cleanup");
1036 cgf.popCleanupBlocks(cleanupStackDepth, valuesToReload);
1037 performCleanup = false;
1038 cgf.currentCleanupStackDepth = oldCleanupStackDepth;
1039 }
1040
1041 /// Whether there are any pending cleanups that have been pushed since
1042 /// this scope was entered.
1043 bool hasPendingCleanups() const {
1044 return cgf.ehStack.stable_begin() != cleanupStackDepth;
1045 }
1046 };
1047
1048 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1050
1051public:
1052 /// Represents a scope, including function bodies, compound statements, and
1053 /// the substatements of if/while/do/for/switch/try statements. This class
1054 /// handles any automatic cleanup, along with the return value.
1055 struct LexicalScope : public RunCleanupsScope {
1056 private:
1057 // Block containing cleanup code for things initialized in this
1058 // lexical context (scope).
1059 mlir::Block *cleanupBlock = nullptr;
1060
1061 // Points to the scope entry block. This is useful, for instance, for
1062 // helping to insert allocas before finalizing any recursive CodeGen from
1063 // switches.
1064 mlir::Block *entryBlock;
1065
1066 LexicalScope *parentScope = nullptr;
1067
1068 // Holds the actual value for ScopeKind::Try
1069 cir::TryOp tryOp = nullptr;
1070
1071 // On a coroutine body, the OnFallthrough sub stmt holds the handler
1072 // (CoreturnStmt) for control flow falling off the body. Keep track
1073 // of emitted co_return in this scope and allow OnFallthrough to be
1074 // skipeed.
1075 bool hasCoreturnStmt = false;
1076
1077 // Only Regular is used at the moment. Support for other kinds will be
1078 // added as the relevant statements/expressions are upstreamed.
1079 enum Kind {
1080 Regular, // cir.if, cir.scope, if_regions
1081 Ternary, // cir.ternary
1082 Switch, // cir.switch
1083 Try, // cir.try
1084 GlobalInit // cir.global initialization code
1085 };
1086 Kind scopeKind = Kind::Regular;
1087
1088 // The scope return value.
1089 mlir::Value retVal = nullptr;
1090
1091 mlir::Location beginLoc;
1092 mlir::Location endLoc;
1093
1094 public:
1095 unsigned depth = 0;
1096
1097 LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
1098 : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
1099 beginLoc(loc), endLoc(loc) {
1100
1101 assert(entryBlock && "LexicalScope requires an entry block");
1102 cgf.curLexScope = this;
1103 if (parentScope)
1104 ++depth;
1105
1106 if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
1107 assert(fusedLoc.getLocations().size() == 2 && "too many locations");
1108 beginLoc = fusedLoc.getLocations()[0];
1109 endLoc = fusedLoc.getLocations()[1];
1110 }
1111 }
1112
1113 void setRetVal(mlir::Value v) { retVal = v; }
1114
1115 void cleanup();
1116 void restore() { cgf.curLexScope = parentScope; }
1117
1120 cleanup();
1121 restore();
1122 }
1123
1124 // ---
1125 // Coroutine tracking
1126 // ---
1127 bool hasCoreturn() const { return hasCoreturnStmt; }
1128 void setCoreturn() { hasCoreturnStmt = true; }
1129
1130 // ---
1131 // Kind
1132 // ---
1133 bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
1134 bool isRegular() { return scopeKind == Kind::Regular; }
1135 bool isSwitch() { return scopeKind == Kind::Switch; }
1136 bool isTernary() { return scopeKind == Kind::Ternary; }
1137 bool isTry() { return scopeKind == Kind::Try; }
1138 cir::TryOp getClosestTryParent();
1139 void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
1140 void setAsSwitch() { scopeKind = Kind::Switch; }
1141 void setAsTernary() { scopeKind = Kind::Ternary; }
1142 void setAsTry(cir::TryOp op) {
1143 scopeKind = Kind::Try;
1144 tryOp = op;
1145 }
1146
1147 // Lazy create cleanup block or return what's available.
1148 mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
1149 if (cleanupBlock)
1150 return cleanupBlock;
1151 cleanupBlock = createCleanupBlock(builder);
1152 return cleanupBlock;
1153 }
1154
1155 cir::TryOp getTry() {
1156 assert(isTry());
1157 return tryOp;
1158 }
1159
1160 mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
1161 return cleanupBlock;
1162 }
1163
1164 mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
1165 // Create the cleanup block but dont hook it up around just yet.
1166 mlir::OpBuilder::InsertionGuard guard(builder);
1167 mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
1168 : &cgf.curFn->getRegion(0);
1169 cleanupBlock = builder.createBlock(r);
1170 return cleanupBlock;
1171 }
1172
1173 // ---
1174 // Return handling.
1175 // ---
1176
1177 private:
1178 // On switches we need one return block per region, since cases don't
1179 // have their own scopes but are distinct regions nonetheless.
1180
1181 // TODO: This implementation should change once we have support for early
1182 // exits in MLIR structured control flow (llvm-project#161575)
1184 llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
1185 llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
1186 std::optional<unsigned> normalRetBlockIndex;
1187
1188 // There's usually only one ret block per scope, but this needs to be
1189 // get or create because of potential unreachable return statements, note
1190 // that for those, all source location maps to the first one found.
1191 mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1192 assert((isa_and_nonnull<cir::CaseOp>(
1193 cgf.builder.getBlock()->getParentOp()) ||
1194 retBlocks.size() == 0) &&
1195 "only switches can hold more than one ret block");
1196
1197 // Create the return block but don't hook it up just yet.
1198 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
1199 auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
1200 retBlocks.push_back(b);
1201 updateRetLoc(b, loc);
1202 return b;
1203 }
1204
1205 cir::ReturnOp emitReturn(mlir::Location loc);
1206 void emitImplicitReturn();
1207
1208 public:
1210 mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
1211 void updateRetLoc(mlir::Block *b, mlir::Location loc) {
1212 retLocs.insert_or_assign(b, loc);
1213 }
1214
1215 mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1216 // Check if we're inside a case region
1217 if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
1218 cgf.builder.getBlock()->getParentOp())) {
1219 auto iter = retBlockInCaseIndex.find(caseOp);
1220 if (iter != retBlockInCaseIndex.end()) {
1221 // Reuse existing return block
1222 mlir::Block *ret = retBlocks[iter->second];
1223 updateRetLoc(ret, loc);
1224 return ret;
1225 }
1226 // Create new return block
1227 mlir::Block *ret = createRetBlock(cgf, loc);
1228 retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
1229 return ret;
1230 }
1231
1232 if (normalRetBlockIndex) {
1233 mlir::Block *ret = retBlocks[*normalRetBlockIndex];
1234 updateRetLoc(ret, loc);
1235 return ret;
1236 }
1237
1238 mlir::Block *ret = createRetBlock(cgf, loc);
1239 normalRetBlockIndex = retBlocks.size() - 1;
1240 return ret;
1241 }
1242
1243 mlir::Block *getEntryBlock() { return entryBlock; }
1244 };
1245
1247
1248 typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
1249
1251
1252 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
1253 QualType type);
1254
1256 Destroyer *destroyer);
1257
1259
1260 /// Start generating a thunk function.
1261 void startThunk(cir::FuncOp fn, GlobalDecl gd,
1262 const CIRGenFunctionInfo &fnInfo, bool isUnprototyped);
1263
1264 /// Finish generating a thunk function.
1265 void finishThunk();
1266
1267 /// Generate code for a thunk function.
1268 void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo,
1269 GlobalDecl gd, const ThunkInfo &thunk,
1270 bool isUnprototyped);
1271
1272 /// ----------------------
1273 /// CIR emit functions
1274 /// ----------------------
1275public:
1276 bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr,
1278 clang::SVETypeFlags typeFlags);
1279 mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts,
1280 mlir::Location loc);
1281 std::optional<mlir::Value>
1282 emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
1284 llvm::Triple::ArchType arch);
1285 std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
1286 const CallExpr *expr);
1287 std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
1288 const CallExpr *expr);
1289
1290 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
1291 SourceLocation loc,
1292 SourceLocation assumptionLoc,
1293 int64_t alignment,
1294 mlir::Value offsetValue = nullptr);
1295
1296 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
1297 SourceLocation assumptionLoc,
1298 int64_t alignment,
1299 mlir::Value offsetValue = nullptr);
1300
1301private:
1302 void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
1303 clang::CharUnits alignment);
1304
1305 CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
1306
1307public:
1309 llvm::StringRef fieldName,
1310 unsigned fieldIndex);
1311
1312 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1313 mlir::Location loc, clang::CharUnits alignment,
1314 bool insertIntoFnEntryBlock,
1315 mlir::Value arraySize = nullptr);
1316 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1317 mlir::Location loc, clang::CharUnits alignment,
1318 mlir::OpBuilder::InsertPoint ip,
1319 mlir::Value arraySize = nullptr);
1320
1321 void emitAggregateStore(mlir::Value value, Address dest);
1322
1323 void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
1324
1326
1328
1329 /// Emit an aggregate copy.
1330 ///
1331 /// \param isVolatile \c true iff either the source or the destination is
1332 /// volatile.
1333 /// \param MayOverlap Whether the tail padding of the destination might be
1334 /// occupied by some other object. More efficient code can often be
1335 /// generated if not.
1336 void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
1337 AggValueSlot::Overlap_t mayOverlap,
1338 bool isVolatile = false);
1339
1340 /// Emit code to compute the specified expression which can have any type. The
1341 /// result is returned as an RValue struct. If this is an aggregate
1342 /// expression, the aggloc/agglocvolatile arguments indicate where the result
1343 /// should be returned.
1346 bool ignoreResult = false);
1347
1348 /// Emits the code necessary to evaluate an arbitrary expression into the
1349 /// given memory location.
1350 void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
1351 bool isInitializer);
1352
1353 /// Similarly to emitAnyExpr(), however, the result will always be accessible
1354 /// even if no aggregate location is provided.
1356
1357 void emitAnyExprToExn(const Expr *e, Address addr);
1358
1359 void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
1360 QualType elementType, CharUnits elementAlign,
1361 Destroyer *destroyer);
1362
1363 mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
1364 QualType &baseType, Address &addr);
1366
1368
1370 LValueBaseInfo *baseInfo = nullptr);
1371
1372 std::pair<mlir::Value, mlir::Type>
1374 QualType inputType, std::string &constraintString,
1375 SourceLocation loc);
1376 std::pair<mlir::Value, mlir::Type>
1377 emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr,
1378 std::string &constraintString);
1379 mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
1380
1382 void emitAtomicInit(Expr *init, LValue dest);
1383 void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
1384 void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
1385 bool isVolatile, bool isInit);
1387 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
1388 llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
1389
1390 mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s);
1391
1392 AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
1393 mlir::OpBuilder::InsertPoint ip = {});
1394
1395 /// Emit code and set up symbol table for a variable declaration with auto,
1396 /// register, or no storage class specifier. These turn into simple stack
1397 /// objects, globals depending on target.
1398 void emitAutoVarDecl(const clang::VarDecl &d);
1399
1400 void emitAutoVarCleanups(const AutoVarEmission &emission);
1401 /// Emit the initializer for an allocated variable. If this call is not
1402 /// associated with the call to emitAutoVarAlloca (as the address of the
1403 /// emission is not directly an alloca), the allocatedSeparately parameter can
1404 /// be used to suppress the assertions. However, this should only be used in
1405 /// extreme cases, as it doesn't properly reflect the language/AST.
1406 void emitAutoVarInit(const AutoVarEmission &emission);
1407 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1409
1410 void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
1411
1412 void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
1413 CXXCtorInitializer *baseInit);
1414
1416
1417 mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
1418
1419 RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
1420 const clang::CallExpr *e, ReturnValueSlot returnValue);
1421
1422 /// Returns a Value corresponding to the size of the given expression by
1423 /// emitting a `cir.objsize` operation.
1424 ///
1425 /// \param e The expression whose object size to compute
1426 /// \param type Determines the semantics of the object size computation.
1427 /// The type parameter is a 2-bit value where:
1428 /// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
1429 /// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
1430 /// \param resType The result type for the size value
1431 /// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
1432 /// call `cir.objsize` on this value rather than emitting e.
1433 /// \param isDynamic If true, allows runtime evaluation via dynamic mode
1434 mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
1435 cir::IntType resType, mlir::Value emittedE,
1436 bool isDynamic);
1437
1438 mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
1439 unsigned type,
1440 cir::IntType resType,
1441 mlir::Value emittedE,
1442 bool isDynamic);
1443
1444 int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
1445
1447
1448 RValue emitCall(const CIRGenFunctionInfo &funcInfo,
1449 const CIRGenCallee &callee, ReturnValueSlot returnValue,
1450 const CallArgList &args, cir::CIRCallOpInterface *callOp,
1451 mlir::Location loc);
1454 const CallArgList &args,
1455 cir::CIRCallOpInterface *callOrTryCall = nullptr) {
1456 assert(currSrcLoc && "source location must have been set");
1457 return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
1458 *currSrcLoc);
1459 }
1460
1461 RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
1463
1464 /// Emit the call and return for a thunk function.
1465 void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk,
1466 bool isUnprototyped);
1467
1468 void emitCallArg(CallArgList &args, const clang::Expr *e,
1469 clang::QualType argType);
1470 void emitCallArgs(
1471 CallArgList &args, PrototypeWrapper prototype,
1472 llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
1473 AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
1478
1479 template <typename T>
1480 mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
1481 mlir::ArrayAttr value,
1482 cir::CaseOpKind kind,
1483 bool buildingTopLevelCase);
1484
1486
1487 mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
1488 mlir::Type condType,
1489 bool buildingTopLevelCase);
1490
1491 LValue emitCastLValue(const CastExpr *e);
1492
1493 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
1494 /// sanitizer is enabled, a runtime check is also emitted.
1495 mlir::Value emitCheckedArgForAssume(const Expr *e);
1496
1497 /// Emit a conversion from the specified complex type to the specified
1498 /// destination type, where the destination type is an LLVM scalar type.
1499 mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
1500 QualType dstTy, SourceLocation loc);
1501
1504
1506
1507 mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
1508 cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1509 cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1510 cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
1511 cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
1512 mlir::Value coroframeAddr);
1514
1515 void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
1516
1518
1519 mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
1520
1521 mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s);
1522
1524 AggValueSlot dest);
1525
1528 Address arrayBegin, const CXXConstructExpr *e,
1529 bool newPointerIsChecked,
1530 bool zeroInitialize = false);
1532 mlir::Value numElements, Address arrayBase,
1533 const CXXConstructExpr *e,
1534 bool newPointerIsChecked,
1535 bool zeroInitialize);
1537 clang::CXXCtorType type, bool forVirtualBase,
1538 bool delegating, AggValueSlot thisAVS,
1539 const clang::CXXConstructExpr *e);
1540
1542 clang::CXXCtorType type, bool forVirtualBase,
1543 bool delegating, Address thisAddr,
1545
1546 void emitCXXDeleteExpr(const CXXDeleteExpr *e);
1547
1549 bool forVirtualBase, bool delegating,
1550 Address thisAddr, QualType thisTy);
1551
1553 mlir::Value thisVal, QualType thisTy,
1554 mlir::Value implicitParam,
1555 QualType implicitParamTy, const CallExpr *e);
1556
1557 mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
1559
1562
1564 const Expr *e, Address base, mlir::Value memberPtr,
1565 const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
1566
1568 const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
1569 ReturnValueSlot returnValue, mlir::Value thisPtr,
1570 mlir::Value implicitParam, clang::QualType implicitParamTy,
1571 const clang::CallExpr *ce, CallArgList *rtlArgs);
1572
1574 const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
1575 ReturnValueSlot returnValue, bool hasQualifier,
1576 clang::NestedNameSpecifier qualifier, bool isArrow,
1577 const clang::Expr *base);
1578
1581
1582 mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
1583
1584 void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
1585 mlir::Type elementTy, Address beginPtr,
1586 mlir::Value numElements,
1587 mlir::Value allocSizeWithoutCookie);
1588
1589 /// Create a check for a function parameter that may potentially be
1590 /// declared as non-null.
1591 void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc,
1592 AbstractCallee ac, unsigned paramNum);
1593
1595 const CXXMethodDecl *md,
1597
1600
1602
1604 const CallExpr *callExpr,
1606
1607 void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
1608 Address ptr);
1609
1610 void emitCXXThrowExpr(const CXXThrowExpr *e);
1611
1612 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
1613
1615 clang::CXXCtorType ctorType, FunctionArgList &args);
1616
1617 // It's important not to confuse this and emitDelegateCXXConstructorCall.
1618 // Delegating constructors are the C++11 feature. The constructor delegate
1619 // optimization is used to reduce duplication in the base and complete
1620 // constructors where they are substantially the same.
1622 const FunctionArgList &args);
1623
1624 void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
1625 QualType deleteTy);
1626
1627 mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
1628
1629 mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e);
1630 mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
1631
1632 /// Emit an expression as an initializer for an object (variable, field, etc.)
1633 /// at the given location. The expression is not necessarily the normal
1634 /// initializer for the object, and the address is not necessarily
1635 /// its normal location.
1636 ///
1637 /// \param init the initializing expression
1638 /// \param d the object to act as if we're initializing
1639 /// \param lvalue the lvalue to initialize
1640 /// \param capturedByInit true if \p d is a __block variable whose address is
1641 /// potentially changed by the initializer
1642 void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
1643 LValue lvalue, bool capturedByInit = false);
1644
1645 mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
1646
1647 mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
1648
1649 mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
1650
1652
1654 clang::Expr *init);
1655
1657
1658 mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
1659
1660 mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
1661
1662 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
1663
1664 void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
1665
1666 mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
1668 mlir::NamedAttrList attrs = {});
1669
1670 void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc);
1671
1672 /// Emit the computation of the specified expression of scalar type.
1673 mlir::Value emitScalarExpr(const clang::Expr *e,
1674 bool ignoreResultAssign = false);
1675
1676 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv);
1677
1678 /// Build a debug stoppoint if we are emitting debug info.
1679 void emitStopPoint(const Stmt *s);
1680
1681 // Build CIR for a statement. useCurrentScope should be true if no
1682 // new scopes need be created when finding a compound statement.
1683 mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
1684 llvm::ArrayRef<const Attr *> attrs = {});
1685
1686 mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
1687 bool useCurrentScope);
1688
1689 mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
1690
1691 void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
1692 CallArgList &callArgs);
1693
1694 RValue emitCoawaitExpr(const CoawaitExpr &e,
1695 AggValueSlot aggSlot = AggValueSlot::ignored(),
1696 bool ignoreResult = false);
1697
1698 RValue emitCoyieldExpr(const CoyieldExpr &e,
1699 AggValueSlot aggSlot = AggValueSlot::ignored(),
1700 bool ignoreResult = false);
1701 /// Emit the computation of the specified expression of complex type,
1702 /// returning the result.
1703 mlir::Value emitComplexExpr(const Expr *e);
1704
1705 void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
1706
1707 mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv);
1708
1709 LValue emitComplexAssignmentLValue(const BinaryOperator *e);
1710 LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
1711 LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
1712 mlir::Value &result);
1713
1714 mlir::LogicalResult
1715 emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
1716 AggValueSlot slot = AggValueSlot::ignored());
1717
1718 mlir::LogicalResult
1720 Address *lastValue = nullptr,
1721 AggValueSlot slot = AggValueSlot::ignored());
1722
1723 void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
1724 mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
1725 LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
1726
1727 mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
1728 mlir::Type condType,
1729 bool buildingTopLevelCase);
1730
1732 clang::CXXCtorType ctorType,
1733 const FunctionArgList &args,
1735
1736 /// We are performing a delegate call; that is, the current function is
1737 /// delegating to another one. Produce a r-value suitable for passing the
1738 /// given parameter.
1739 void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
1741
1742 /// Emit an `if` on a boolean condition to the specified blocks.
1743 /// FIXME: Based on the condition, this might try to simplify the codegen of
1744 /// the conditional based on the branch.
1745 /// In the future, we may apply code generation simplifications here,
1746 /// similar to those used in classic LLVM codegen
1747 /// See `EmitBranchOnBoolExpr` for inspiration.
1748 mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
1749 const clang::Stmt *thenS,
1750 const clang::Stmt *elseS);
1751 cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
1752 BuilderCallbackRef thenBuilder,
1753 mlir::Location thenLoc,
1754 BuilderCallbackRef elseBuilder,
1755 std::optional<mlir::Location> elseLoc = {});
1756
1757 mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
1758
1759 LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
1760
1761 mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
1762 mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
1763
1764 void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
1765 void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
1766
1767 mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
1768
1769 /// Emit code to compute the specified expression,
1770 /// ignoring the result.
1771 void emitIgnoredExpr(const clang::Expr *e);
1772
1773 RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
1774
1775 /// Load a complex number from the specified l-value.
1776 mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
1777
1778 RValue emitLoadOfExtVectorElementLValue(LValue lv);
1779
1780 /// Given an expression that represents a value lvalue, this method emits
1781 /// the address of the lvalue, then loads the result as an rvalue,
1782 /// returning the rvalue.
1783 RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
1784
1785 Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
1786 LValueBaseInfo *pointeeBaseInfo);
1787 LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
1788 QualType refTy, AlignmentSource source);
1789
1790 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1791 /// care to appropriately convert from the memory representation to
1792 /// the LLVM value representation. The l-value must be a simple
1793 /// l-value.
1794 mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
1795 mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
1796 SourceLocation loc, LValueBaseInfo baseInfo);
1797
1798 /// Emit code to compute a designator that specifies the location
1799 /// of the expression.
1800 /// FIXME: document this function better.
1801 LValue emitLValue(const clang::Expr *e);
1802 LValue emitLValueForBitField(LValue base, const FieldDecl *field);
1803 LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
1804
1805 LValue emitLValueForLambdaField(const FieldDecl *field);
1806 LValue emitLValueForLambdaField(const FieldDecl *field,
1807 mlir::Value thisValue);
1808
1809 /// Like emitLValueForField, excpet that if the Field is a reference, this
1810 /// will return the address of the reference and not the address of the value
1811 /// stored in the reference.
1812 LValue emitLValueForFieldInitialization(LValue base,
1813 const clang::FieldDecl *field,
1814 llvm::StringRef fieldName);
1815
1816 LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
1817
1818 LValue emitMemberExpr(const MemberExpr *e);
1819
1820 /// Emit a musttail call for a thunk with a potentially different ABI.
1821 void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr,
1822 cir::FuncOp callee);
1823
1824 /// Emit a call to an AMDGPU builtin function.
1825 std::optional<mlir::Value> emitAMDGPUBuiltinExpr(unsigned builtinID,
1826 const CallExpr *expr);
1827
1828 LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
1829
1830 LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
1831
1832 /// Given an expression with a pointer type, emit the value and compute our
1833 /// best estimate of the alignment of the pointee.
1834 ///
1835 /// One reasonable way to use this information is when there's a language
1836 /// guarantee that the pointer must be aligned to some stricter value, and
1837 /// we're simply trying to ensure that sufficiently obvious uses of under-
1838 /// aligned objects don't get miscompiled; for example, a placement new
1839 /// into the address of a local variable. In such a case, it's quite
1840 /// reasonable to just ignore the returned alignment when it isn't from an
1841 /// explicit source.
1842 Address emitPointerWithAlignment(const clang::Expr *expr,
1843 LValueBaseInfo *baseInfo = nullptr);
1844
1845 /// Emits a reference binding to the passed in expression.
1846 RValue emitReferenceBindingToExpr(const Expr *e);
1847
1848 mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
1849
1850 RValue emitRotate(const CallExpr *e, bool isRotateLeft);
1851
1852 mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
1853
1854 /// Emit a conversion from the specified type to the specified destination
1855 /// type, both of which are CIR scalar types.
1856 mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
1857 clang::QualType dstType,
1858 clang::SourceLocation loc);
1859
1860 void emitScalarInit(const clang::Expr *init, mlir::Location loc,
1861 LValue lvalue, bool capturedByInit = false);
1862
1863 mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
1864 const Expr *argExpr);
1865
1866 void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
1867
1868 /// Emit a guarded initializer for a static local variable or a static
1869 /// data member of a class template instantiation.
1870 void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp,
1871 bool performInit);
1872
1873 void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
1874 bool isInit);
1875
1876 void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
1877 clang::QualType ty, LValueBaseInfo baseInfo,
1878 bool isInit = false, bool isNontemporal = false);
1879 void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
1880
1881 /// Store the specified rvalue into the specified
1882 /// lvalue, where both are guaranteed to the have the same type, and that type
1883 /// is 'Ty'.
1884 void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
1885
1886 mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
1887
1888 LValue emitStringLiteralLValue(const StringLiteral *e,
1889 llvm::StringRef name = ".str");
1890
1891 mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
1892 mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
1893 bool buildingTopLevelCase);
1894 mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
1895
1896 std::optional<mlir::Value>
1897 emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
1898 ReturnValueSlot &returnValue);
1899
1900 /// Given a value and its clang type, returns the value casted to its memory
1901 /// representation.
1902 /// Note: CIR defers most of the special casting to the final lowering passes
1903 /// to conserve the high level information.
1904 mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
1905
1906 /// EmitFromMemory - Change a scalar value from its memory
1907 /// representation to its value representation.
1908 mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty);
1909
1910 /// Emit a trap instruction, which is used to abort the program in an abnormal
1911 /// way, usually for debugging purposes.
1912 /// \p createNewBlock indicates whether to create a new block for the IR
1913 /// builder. Since the `cir.trap` operation is a terminator, operations that
1914 /// follow a trap cannot be emitted after `cir.trap` in the same block. To
1915 /// ensure these operations get emitted successfully, you need to create a new
1916 /// dummy block and set the insertion point there before continuing from the
1917 /// trap operation.
1918 void emitTrap(mlir::Location loc, bool createNewBlock);
1919
1920 LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
1921
1922 mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
1923
1924 /// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
1925 /// checking is enabled. Otherwise, just emit an unreachable instruction.
1926 /// \p createNewBlock indicates whether to create a new block for the IR
1927 /// builder. Since the `cir.unreachable` operation is a terminator, operations
1928 /// that follow an unreachable point cannot be emitted after `cir.unreachable`
1929 /// in the same block. To ensure these operations get emitted successfully,
1930 /// you need to create a dummy block and set the insertion point there before
1931 /// continuing from the unreachable point.
1932 void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
1933
1934 /// This method handles emission of any variable declaration
1935 /// inside a function, including static vars etc.
1936 void emitVarDecl(const clang::VarDecl &d);
1937
1938 void emitVariablyModifiedType(QualType ty);
1939
1940 mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
1941
1942 std::optional<mlir::Value> emitRISCVBuiltinExpr(unsigned builtinID,
1943 const CallExpr *expr);
1944
1945 std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
1946 const CallExpr *expr);
1947
1948 /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
1949 /// nonnull, if 1\p LHS is marked _Nonnull.
1950 void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
1951 clang::SourceLocation loc);
1952
1953 /// An object to manage conditionally-evaluated expressions.
1955 CIRGenFunction &cgf;
1956 mlir::OpBuilder::InsertPoint insertPt;
1957
1958 public:
1960 : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
1961 ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
1962 : cgf(cgf), insertPt(ip) {}
1963
1965 assert(cgf.outermostConditional != this);
1966 if (!cgf.outermostConditional)
1967 cgf.outermostConditional = this;
1968 }
1969
1971 assert(cgf.outermostConditional != nullptr);
1972 if (cgf.outermostConditional == this)
1973 cgf.outermostConditional = nullptr;
1974 }
1975
1976 /// Returns the insertion point which will be executed prior to each
1977 /// evaluation of the conditional code. In LLVM OG, this method
1978 /// is called getStartingBlock.
1979 mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
1980 };
1981
1983 std::optional<LValue> lhs{}, rhs{};
1984 mlir::Value result{};
1985 };
1986
1987 // Return true if we're currently emitting one branch or the other of a
1988 // conditional expression.
1989 bool isInConditionalBranch() const { return outermostConditional != nullptr; }
1990
1991 void setBeforeOutermostConditional(mlir::Value value, Address addr) {
1992 assert(isInConditionalBranch());
1993 {
1994 mlir::OpBuilder::InsertionGuard guard(builder);
1995 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
1996 builder.createStore(
1997 value.getLoc(), value, addr, /*isVolatile=*/false,
1998 mlir::IntegerAttr::get(
1999 mlir::IntegerType::get(value.getContext(), 64),
2000 (uint64_t)addr.getAlignment().getAsAlign().value()));
2001 }
2002 }
2003
2004 // Points to the outermost active conditional control. This is used so that
2005 // we know if a temporary should be destroyed conditionally.
2007
2008 /// An RAII object to record that we're evaluating a statement
2009 /// expression.
2011 CIRGenFunction &cgf;
2012
2013 /// We have to save the outermost conditional: cleanups in a
2014 /// statement expression aren't conditional just because the
2015 /// StmtExpr is.
2016 ConditionalEvaluation *savedOutermostConditional;
2017
2018 public:
2020 : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
2021 cgf.outermostConditional = nullptr;
2022 }
2023
2025 cgf.outermostConditional = savedOutermostConditional;
2026 }
2027 };
2028
2029 template <typename FuncTy>
2030 ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
2031 const FuncTy &branchGenFunc);
2032
2033 mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
2034 const clang::Stmt *thenS,
2035 const clang::Stmt *elseS);
2036
2037 /// Build a "reference" to a va_list; this is either the address or the value
2038 /// of the expression, depending on how va_list is defined.
2039 Address emitVAListRef(const Expr *e);
2040
2041 /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
2042 ///
2043 /// \param vaList A reference to the \c va_list as emitted by either
2044 /// \c emitVAListRef or \c emitMSVAListRef.
2045 void emitVAStart(mlir::Value vaList);
2046
2047 /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
2048 ///
2049 /// \param vaList A reference to the \c va_list as emitted by either
2050 /// \c emitVAListRef or \c emitMSVAListRef.
2051 void emitVAEnd(mlir::Value vaList);
2052
2053 /// Generate code to get an argument from the passed in pointer
2054 /// and update it accordingly.
2055 ///
2056 /// \param ve The \c VAArgExpr for which to generate code.
2057 ///
2058 /// \param vaListAddr Receives a reference to the \c va_list as emitted by
2059 /// either \c emitVAListRef or \c emitMSVAListRef.
2060 ///
2061 /// \returns SSA value with the argument.
2062 mlir::Value emitVAArg(VAArgExpr *ve);
2063
2064 /// ----------------------
2065 /// CIR build helpers
2066 /// -----------------
2067public:
2068 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2069 const Twine &name = "tmp",
2070 mlir::Value arraySize = nullptr,
2071 bool insertIntoFnEntryBlock = false);
2072 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2073 const Twine &name = "tmp",
2074 mlir::OpBuilder::InsertPoint ip = {},
2075 mlir::Value arraySize = nullptr);
2076 Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
2077 const Twine &name = "tmp",
2078 mlir::Value arraySize = nullptr,
2079 Address *alloca = nullptr,
2080 mlir::OpBuilder::InsertPoint ip = {});
2081 Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
2082 mlir::Location loc,
2083 const Twine &name = "tmp",
2084 mlir::Value arraySize = nullptr,
2085 mlir::OpBuilder::InsertPoint ip = {});
2086 Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc,
2087 const Twine &name);
2088
2089 /// Create a temporary memory object of the given type, with
2090 /// appropriate alignmen and cast it to the default address space. Returns
2091 /// the original alloca instruction by \p Alloca if it is not nullptr.
2092 Address createMemTemp(QualType t, mlir::Location loc,
2093 const Twine &name = "tmp", Address *alloca = nullptr,
2094 mlir::OpBuilder::InsertPoint ip = {});
2095 Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
2096 const Twine &name = "tmp", Address *alloca = nullptr,
2097 mlir::OpBuilder::InsertPoint ip = {});
2098
2099 mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const {
2100 if (cir::GlobalOp globalOp = v.getDefiningOp<cir::GlobalOp>())
2101 cgm.errorNYI("Global op addrspace cast");
2102 return builder.createAddrSpaceCast(v, destTy);
2103 }
2104
2105 //===--------------------------------------------------------------------===//
2106 // OpenMP Emission
2107 //===--------------------------------------------------------------------===//
2108public:
2109 mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
2110 mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
2111 mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
2112 mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
2113 mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
2114 mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
2115 mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
2116 mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
2117 mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
2118 mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
2119 mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
2120 mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
2121 mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
2122 mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
2123 mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
2124 mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
2125 mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
2126 mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
2127 mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
2128 mlir::LogicalResult
2129 emitOMPParallelForDirective(const OMPParallelForDirective &s);
2130 mlir::LogicalResult
2131 emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
2132 mlir::LogicalResult
2133 emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
2134 mlir::LogicalResult
2135 emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
2136 mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
2137 mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
2138 mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
2139 mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
2140 mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
2141 mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
2142 mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
2143 mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
2144 mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
2145 mlir::LogicalResult
2147 mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
2148 mlir::LogicalResult
2150 mlir::LogicalResult
2152 mlir::LogicalResult
2154 mlir::LogicalResult
2156 mlir::LogicalResult
2158 mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
2159 mlir::LogicalResult
2161 mlir::LogicalResult
2163 mlir::LogicalResult
2165 mlir::LogicalResult
2167 mlir::LogicalResult
2169 mlir::LogicalResult
2171 mlir::LogicalResult
2172 emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
2173 mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
2177 mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
2181 mlir::LogicalResult
2183 mlir::LogicalResult emitOMPDistributeParallelForDirective(
2187 mlir::LogicalResult
2191 mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
2193 mlir::LogicalResult
2195 mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
2197 mlir::LogicalResult
2199 mlir::LogicalResult
2201 mlir::LogicalResult
2207 mlir::LogicalResult
2209 mlir::LogicalResult
2211 mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
2219 mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
2220 mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
2221 mlir::LogicalResult
2223 mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
2224 mlir::LogicalResult
2226 mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
2227 mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
2228 mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
2229
2233 void emitOMPAllocateDecl(const OMPAllocateDecl &d);
2236 void emitOMPRequiresDecl(const OMPRequiresDecl &d);
2237
2238private:
2239 template <typename Op>
2240 void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
2241
2242 //===--------------------------------------------------------------------===//
2243 // OpenACC Emission
2244 //===--------------------------------------------------------------------===//
2245private:
2246 template <typename Op>
2247 Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
2249 // Function to do the basic implementation of an operation with an Associated
2250 // Statement. Models AssociatedStmtConstruct.
2251 template <typename Op, typename TermOp>
2252 mlir::LogicalResult
2253 emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
2254 OpenACCDirectiveKind dirKind,
2256 const Stmt *associatedStmt);
2257
2258 template <typename Op, typename TermOp>
2259 mlir::LogicalResult emitOpenACCOpCombinedConstruct(
2260 mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
2261 llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
2262
2263 template <typename Op>
2264 void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
2266 // The second template argument doesn't need to be a template, since it should
2267 // always be an mlir::acc::LoopOp, but as this is a template anyway, we make
2268 // it a template argument as this way we can avoid including the OpenACC MLIR
2269 // headers here. We will count on linker failures/explicit instantiation to
2270 // ensure we don't mess this up, but it is only called from 1 place, and
2271 // instantiated 3x.
2272 template <typename ComputeOp, typename LoopOp>
2273 void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
2274 OpenACCDirectiveKind dirKind,
2276
2277 // The OpenACC LoopOp requires that we have auto, seq, or independent on all
2278 // LoopOp operations for the 'none' device type case. This function checks if
2279 // the LoopOp has one, else it updates it to have one.
2280 void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
2282
2283 // The OpenACC 'cache' construct actually applies to the 'loop' if present. So
2284 // keep track of the 'loop' so that we can add the cache vars to it correctly.
2285 mlir::acc::LoopOp *activeLoopOp = nullptr;
2286
2287 struct ActiveOpenACCLoopRAII {
2288 CIRGenFunction &cgf;
2289 mlir::acc::LoopOp *oldLoopOp;
2290
2291 ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
2292 : cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
2293 cgf.activeLoopOp = newOp;
2294 }
2295 ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
2296 };
2297
2298 // Keep track of the last place we inserted a 'recipe' so that we can insert
2299 // the next one in lexical order.
2300 mlir::OpBuilder::InsertPoint lastRecipeLocation;
2301
2302public:
2303 // Helper type used to store the list of important information for a 'data'
2304 // clause variable, or a 'cache' variable reference.
2306 mlir::Location beginLoc;
2307 mlir::Value varValue;
2308 std::string name;
2309 // The type of the original variable reference: that is, after 'bounds' have
2310 // removed pointers/array types/etc. So in the case of int arr[5], and a
2311 // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
2315 // The list of types that we found when going through the bounds, which we
2316 // can use to properly set the alloca section.
2318 };
2319
2320 // Gets the collection of info required to lower and OpenACC clause or cache
2321 // construct variable reference.
2323 // Helper function to emit the integer expressions as required by an OpenACC
2324 // clause/construct.
2325 mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
2326 // Helper function to emit an integer constant as an mlir int type, used for
2327 // constants in OpenACC constructs/clauses.
2328 mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
2329 int64_t value);
2330
2331 mlir::LogicalResult
2333 mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
2334 mlir::LogicalResult
2336 mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
2337 mlir::LogicalResult
2339 mlir::LogicalResult
2341 mlir::LogicalResult
2343 mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
2344 mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
2345 mlir::LogicalResult
2347 mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
2348 mlir::LogicalResult
2350 mlir::LogicalResult
2352 mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
2353
2356
2357 /// Create a temporary memory object for the given aggregate type.
2358 AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
2359 const Twine &name = "tmp",
2360 Address *alloca = nullptr) {
2362 return AggValueSlot::forAddr(
2363 createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
2366 }
2367
2368private:
2369 QualType getVarArgType(const Expr *arg);
2370};
2371
2372} // namespace clang::CIRGen
2373
2374#endif
Defines the clang::ASTContext interface.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
Defines the clang::Expr interface and subclasses for C++ expressions.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
Enumerates target-specific builtins in their own namespaces within namespace clang.
C Language Family Type Representation.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
This represents 'pragma omp cancel' directive.
This represents 'pragma omp cancellation point' directive.
This represents 'pragma omp dispatch' directive.
This represents 'pragma omp distribute' directive.
This represents 'pragma omp distribute parallel for' composite directive.
This represents 'pragma omp distribute parallel for simd' composite directive.
This represents 'pragma omp distribute simd' composite directive.
This represents 'pragma omp error' directive.
Represents the 'pragma omp fuse' loop transformation directive.
This represents 'pragma omp loop' directive.
Represents the 'pragma omp interchange' loop transformation directive.
This represents 'pragma omp interop' directive.
This represents 'pragma omp masked' directive.
This represents 'pragma omp masked taskloop' directive.
This represents 'pragma omp masked taskloop simd' directive.
This represents 'pragma omp master taskloop' directive.
This represents 'pragma omp master taskloop simd' directive.
This represents 'pragma omp metadirective' directive.
This represents 'pragma omp parallel loop' directive.
This represents 'pragma omp parallel masked taskloop' directive.
This represents 'pragma omp parallel masked taskloop simd' directive.
This represents 'pragma omp parallel master taskloop' directive.
This represents 'pragma omp parallel master taskloop simd' directive.
Represents the 'pragma omp reverse' loop transformation directive.
This represents 'pragma omp scan' directive.
This represents the 'pragma omp stripe' loop transformation directive.
This represents 'pragma omp target data' directive.
This represents 'pragma omp target' directive.
This represents 'pragma omp target enter data' directive.
This represents 'pragma omp target exit data' directive.
This represents 'pragma omp target parallel' directive.
This represents 'pragma omp target parallel for' directive.
This represents 'pragma omp target parallel for simd' directive.
This represents 'pragma omp target parallel loop' directive.
This represents 'pragma omp target simd' directive.
This represents 'pragma omp target teams' directive.
This represents 'pragma omp target teams distribute' combined directive.
This represents 'pragma omp target teams distribute parallel for' combined directive.
This represents 'pragma omp target teams distribute parallel for simd' combined directive.
This represents 'pragma omp target teams distribute simd' combined directive.
This represents 'pragma omp target teams loop' directive.
This represents 'pragma omp target update' directive.
This represents 'pragma omp taskloop' directive.
This represents 'pragma omp taskloop simd' directive.
This represents 'pragma omp teams' directive.
This represents 'pragma omp teams distribute' directive.
This represents 'pragma omp teams distribute parallel for' composite directive.
This represents 'pragma omp teams distribute parallel for simd' composite directive.
This represents 'pragma omp teams distribute simd' combined directive.
This represents 'pragma omp teams loop' directive.
This represents the 'pragma omp tile' loop transformation directive.
This represents the 'pragma omp unroll' loop transformation directive.
This class represents a 'loop' construct. The 'loop' construct applies to a 'for' loop (or range-for ...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3772
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3278
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6927
Represents an attribute applied to a statement.
Definition Stmt.h:2204
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition Expr.h:4456
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition Expr.h:4494
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition Expr.h:4491
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
BreakStmt - This represents a break.
Definition Stmt.h:3136
mlir::Value getPointer() const
Definition Address.h:96
static Address invalid()
Definition Address.h:74
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Value getBasePointer() const
Definition Address.h:101
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
An abstract representation of regular/ObjC call/message targets.
AbstractCallee(const clang::FunctionDecl *fd)
const clang::ParmVarDecl * getParamDecl(unsigned I) const
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
An object to manage conditionally-evaluated expressions.
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
mlir::OpBuilder::InsertPoint getInsertPoint() const
Returns the insertion point which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forReference(mlir::TypedAttr c)
static ConstantEmission forValue(mlir::TypedAttr c)
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, LValue lvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
RunCleanupsScope(CIRGenFunction &cgf)
Enter a new cleanup scope.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool hasPendingCleanups() const
Whether there are any pending cleanups that have been pushed since this scope was entered.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void restore()
Can be used to restore the state early, before the dtor is run.
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value)
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
static bool hasScalarEvaluationKind(clang::QualType type)
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
void emitOpenACCRoutine(const OpenACCRoutineDecl &d)
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, cir::GlobalOp gv, cir::GetGlobalOp gvAddr)
Add the initializer for 'd' to the global variable that has already been created for it.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo, GlobalDecl gd, const ThunkInfo &thunk, bool isUnprototyped)
Generate code for a thunk function.
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard SourceLocExprScopeGuard
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, mlir::OpBuilder::InsertPoint ip={})
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
ImplicitParamDecl * cxxabiThisDecl
CXXThisDecl - When generating code for a C++ member function, this will hold the implicit 'this' decl...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
bool curFuncIsThunk
In C++, whether we are code generating a thunk.
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
llvm::ScopedHashTable< const clang::Decl *, mlir::Value > SymTableTy
The symbol table maps a variable name to a value in the current scope.
void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc)
Definition CIRGenCXX.cpp:33
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, CallArgList &callArgs)
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
void emitOMPRequiresDecl(const OMPRequiresDecl &d)
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
Address cxxDefaultInitExprThis
The value of 'this' to sue when evaluating CXXDefaultInitExprs within this expression.
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
void setBeforeOutermostConditional(mlir::Value value, Address addr)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::Value loadCXXThis()
Load the value for 'this'.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void addCatchHandlerAttr(const CXXCatchStmt *catchStmt, SmallVector< mlir::Attribute > &handlerAttrs)
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
const clang::Decl * curFuncDecl
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
LValue emitLValueForLambdaField(const FieldDecl *field)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOrTryCall=nullptr)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
bool isTrivialInitializer(const Expr *init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void emitOpenACCDeclare(const OpenACCDeclareDecl &d)
Address getAddrOfLocalVar(const clang::VarDecl *vd)
Return the address of a local variable.
void emitAnyExprToExn(const Expr *e, Address addr)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr, cir::FuncOp callee)
Emit a musttail call for a thunk with a potentially different ABI.
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy)
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, bool delegating)
Return the VTT parameter that should be passed to a base constructor/destructor with virtual bases.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
void initializeVTablePointers(mlir::Location loc, const clang::CXXRecordDecl *rd)
mlir::Type convertType(const TypeDecl *t)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d)
void emitAggregateStore(mlir::Value value, Address dest)
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
ConditionalEvaluation * outermostConditional
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit)
RValue emitAtomicExpr(AtomicExpr *e)
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, LValue lvalue, bool capturedByInit=false)
Emit an expression as an initializer for an object (variable, field, etc.) at the given location.
void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp, bool performInit)
Emit a guarded initializer for a static local variable or a static data member of a class template in...
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass)
const TargetCIRGenInfo & getTargetHooks() const
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
RValue emitCoyieldExpr(const CoyieldExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
std::optional< mlir::Value > emitRISCVBuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e)
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
CleanupKind getCleanupKind(QualType::DestructionKind kind)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
std::pair< mlir::Value, mlir::Type > emitAsmInputLValue(const TargetInfo::ConstraintInfo &info, LValue inputValue, QualType inputType, std::string &constraintString, SourceLocation loc)
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
void emitOMPAllocateDecl(const OMPAllocateDecl &d)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType)
ImplicitParamDecl * cxxStructorImplicitParamDecl
When generating code for a constructor or destructor, this will hold the implicit argument (e....
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
void startThunk(cir::FuncOp fn, GlobalDecl gd, const CIRGenFunctionInfo &fnInfo, bool isUnprototyped)
Start generating a thunk function.
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
LValue emitAggExprToLValue(const Expr *e)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
clang::CurrentSourceLocExprScope curSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
mlir::Value loadCXXVTT()
Load the VTT parameter to base constructors/destructors have virtual bases.
void emitVarDecl(const clang::VarDecl &d)
This method handles emission of any variable declaration inside a function, including static vars etc...
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts, mlir::Location loc)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr, SmallVectorImpl< mlir::Value > &ops, clang::SVETypeFlags typeFlags)
Address returnValue
The temporary alloca to hold the return value.
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo)
void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk, bool isUnprototyped)
Emit the call and return for a thunk function.
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr, const clang::CXXRecordDecl *vtableClass)
Return the Value of the vtable pointer member pointed to by thisAddr.
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Destroys all the elements of the given array, beginning from last to first.
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
RValue emitAnyExprToTemp(const clang::Expr *e)
Similarly to emitAnyExpr(), however, the result will always be accessible even if no aggregate locati...
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS)
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::SmallPtrSet< const clang::CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
bool hasVolatileMember(QualType t)
returns true if aggregate type has a volatile member.
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
void emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType)
clang::FieldDecl * lambdaThisCaptureField
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
void emitConstructorBody(FunctionArgList &args)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
bool haveInsertPoint() const
True if an insertion point is defined.
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void maybeEmitDeferredVarDeclInit(const VarDecl *vd)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
std::optional< mlir::Value > emitAArch64SMEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void pushStackRestore(CleanupKind kind, Address spMem)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
void emitAutoVarDecl(const clang::VarDecl &d)
Emit code and set up symbol table for a variable declaration with auto, register, or no storage class...
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
bool didCallStackSave
Whether a cir.stacksave operation has been added.
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
mlir::Value emitOpenACCIntExpr(const Expr *intExpr)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer)
Immediately perform the destruction of the given object.
void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc, AbstractCallee ac, unsigned paramNum)
Create a check for a function parameter that may potentially be declared as non-null.
const CIRGenModule & getCIRGenModule() const
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, llvm::ArrayRef< mlir::Value > args={}, mlir::NamedAttrList attrs={})
void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty)
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
void emitAtomicInit(Expr *init, LValue dest)
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, int64_t value)
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d)
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
void emitCXXThrowExpr(const CXXThrowExpr *e)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
llvm::SmallVector< VPtr, 4 > VPtrsVector
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
bool sawAsmBlock
Whether or not a Microsoft-style asm block has been processed within this fuction.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
std::pair< mlir::Value, mlir::Type > emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr, std::string &constraintString)
EHScopeStack::stable_iterator currentCleanupStackDepth
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
void pushFullExprCleanup(CleanupKind kind, As... a)
Push a cleanup to be run at the end of the current full-expression.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc)
We are performing a delegate call; that is, the current function is delegating to another one.
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
const CIRGenFunctionInfo * curFnInfo
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
std::optional< mlir::Value > emitAArch64SVEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args)
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize=nullptr)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
void finishThunk()
Finish generating a thunk function.
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
void emitAutoVarCleanups(const AutoVarEmission &emission)
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
A saved depth on the scope stack.
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a call to a CUDA kernel function.
Definition ExprCXX.h:235
CXXCatchStmt - This represents a C++ catch block.
Definition StmtCXX.h:28
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
Represents a C++ constructor within a class.
Definition DeclCXX.h:2611
Represents a C++ base or member initializer.
Definition DeclCXX.h:2376
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1271
A use of a default initializer in a constructor or in aggregate initialization.
Definition ExprCXX.h:1378
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2627
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:482
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2356
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:85
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2746
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
Represents a C++ temporary.
Definition ExprCXX.h:1460
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1209
CXXTryStmt - A C++ try block, including all handlers.
Definition StmtCXX.h:69
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
CaseStmt - Represent a case statement.
Definition Stmt.h:1921
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1741
ContinueStmt - This represents a continue.
Definition Stmt.h:3120
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
SourceLocExprScopeGuard(const Expr *DefaultExpr, CurrentSourceLocExprScope &Current)
Represents the current source location and context used to determine the value of the source location...
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1632
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2833
This represents one expression.
Definition Expr.h:112
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
Represents a member of a struct/union/class.
Definition Decl.h:3175
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2889
Represents a function declaration or definition.
Definition Decl.h:2015
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2970
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3009
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3703
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents 'pragma omp allocate ...' directive.
Definition DeclOpenMP.h:536
Pseudo declaration for capturing expressions.
Definition DeclOpenMP.h:445
This represents 'pragma omp declare mapper ...' directive.
Definition DeclOpenMP.h:349
This represents 'pragma omp declare reduction ...' directive.
Definition DeclOpenMP.h:239
This represents 'pragma omp groupprivate ...' directive.
Definition DeclOpenMP.h:173
This represents 'pragma omp requires...' directive.
Definition DeclOpenMP.h:479
This represents 'pragma omp threadprivate ...' directive.
Definition DeclOpenMP.h:110
ObjCMethodDecl - Represents an instance or class method declaration.
Definition DeclObjC.h:140
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
Represents a parameter to a function.
Definition Decl.h:1805
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8471
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Flags to identify the types for overloaded SVE builtins.
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:227
Represents a declaration of a type.
Definition Decl.h:3528
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
#define bool
Definition gpuintrin.h:32
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
const internal::VariadicDynCastAllOfMatcher< Decl, VarDecl > varDecl
Matches variable declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
OpenACCDirectiveKind
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start, SourceLocation DirectiveLoc, SourceLocation End, ArrayRef< const OpenACCClause * > Clauses, Stmt *StructuredBlock)
CXXDtorType
C++ destructor types.
Definition ABI.h:34
#define true
Definition stdbool.h:25
static bool aggValueSlot()
static bool peepholeProtection()
static bool opAllocaEscapeByReference()
static bool generateDebugInfo()
AutoVarEmission(const clang::VarDecl &variable)
bool isEscapingByRef
True if the variable is a __block variable that is captured by an escaping block.
Address addr
The address of the alloca for languages with explicit address space (e.g.
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
bool isConstantAggregate
True if the variable is of aggregate type and has a constant initializer.
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
Address getObjectAddress(CIRGenFunction &cgf) const
Returns the address of the object within this declaration.
std::unique_ptr< CGCoroData > data
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
mlir::Block * createCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc)
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
void updateRetLoc(mlir::Block *b, mlir::Location loc)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
llvm::PointerUnion< const clang::FunctionProtoType *, const clang::ObjCMethodDecl * > p
PrototypeWrapper(const clang::ObjCMethodDecl *md)
PrototypeWrapper(const clang::FunctionProtoType *ft)
const clang::CXXRecordDecl * vtableClass
const clang::CXXRecordDecl * nearestVBase
VlaSizePair(mlir::Value num, QualType ty)
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition Thunk.h:157