clang 23.0.0git
CIRGenFunction.h
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
14#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
15
16#include "CIRGenBuilder.h"
17#include "CIRGenCall.h"
18#include "CIRGenModule.h"
19#include "CIRGenTypeCache.h"
20#include "CIRGenValue.h"
21#include "EHScopeStack.h"
22
23#include "Address.h"
24
27#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/ExprCXX.h"
31#include "clang/AST/Stmt.h"
32#include "clang/AST/Type.h"
38#include "llvm/ADT/ScopedHashTable.h"
39#include "llvm/IR/Instructions.h"
40
41namespace {
42class ScalarExprEmitter;
43} // namespace
44
45namespace mlir {
46namespace acc {
47class LoopOp;
48} // namespace acc
49} // namespace mlir
50
51namespace clang::CIRGen {
52
53struct CGCoroData;
54
56public:
58
59private:
60 friend class ::ScalarExprEmitter;
61 /// The builder is a helper class to create IR inside a function. The
62 /// builder is stateful, in particular it keeps an "insertion point": this
63 /// is where the next operations will be introduced.
64 CIRGenBuilderTy &builder;
65
66public:
67 /// The GlobalDecl for the current function being compiled or the global
68 /// variable currently being initialized.
70
72
73 /// The compiler-generated variable that holds the return value.
74 std::optional<mlir::Value> fnRetAlloca;
75
76 // Holds coroutine data if the current function is a coroutine. We use a
77 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
78 // in this header.
79 struct CGCoroInfo {
80 std::unique_ptr<CGCoroData> data;
81 CGCoroInfo();
83 };
85
86 bool isCoroutine() const { return curCoro.data != nullptr; }
87
88 /// The temporary alloca to hold the return value. This is
89 /// invalid iff the function has no return value.
91
92 /// Tracks function scope overall cleanup handling.
94
96
97 /// A mapping from NRVO variables to the flags used to indicate
98 /// when the NRVO has been applied to this variable.
99 llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
100
101 llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
104
105 /// CXXThisDecl - When generating code for a C++ member function,
106 /// this will hold the implicit 'this' declaration.
108 mlir::Value cxxabiThisValue = nullptr;
109 mlir::Value cxxThisValue = nullptr;
111
112 /// When generating code for a constructor or destructor, this will hold the
113 /// implicit argument (e.g. VTT).
116
117 /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
118 /// expression.
120
121 // Holds the Decl for the current outermost non-closure context
122 const clang::Decl *curFuncDecl = nullptr;
123 /// This is the inner-most code context, which includes blocks.
124 const clang::Decl *curCodeDecl = nullptr;
126
127 /// The current function or global initializer that is generated code for.
128 /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
129 /// global initializers.
130 mlir::Operation *curFn = nullptr;
131
132 /// Save Parameter Decl for coroutine.
134
135 using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
136 /// This keeps track of the CIR allocas or globals for local C
137 /// declarations.
139
140 /// The type of the condition for the emitting switch statement.
142
143 clang::ASTContext &getContext() const { return cgm.getASTContext(); }
144
145 CIRGenBuilderTy &getBuilder() { return builder; }
146
148 const CIRGenModule &getCIRGenModule() const { return cgm; }
149
151 // We currently assume this isn't called for a global initializer.
152 auto fn = mlir::cast<cir::FuncOp>(curFn);
153 return &fn.getRegion().front();
154 }
155
156 /// Sanitizers enabled for this function.
158
160 public:
164
165 private:
166 void ConstructorHelper(clang::FPOptions FPFeatures);
167 CIRGenFunction &cgf;
168 clang::FPOptions oldFPFeatures;
169 llvm::fp::ExceptionBehavior oldExcept;
170 llvm::RoundingMode oldRounding;
171 };
173
174 /// The symbol table maps a variable name to a value in the current scope.
175 /// Entering a function creates a new scope, and the function arguments are
176 /// added to the mapping. When the processing of a function is terminated,
177 /// the scope is destroyed and the mappings created in this scope are
178 /// dropped.
179 using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
181
182 /// Whether a cir.stacksave operation has been added. Used to avoid
183 /// inserting cir.stacksave for multiple VLAs in the same scope.
184 bool didCallStackSave = false;
185
186 /// Whether or not a Microsoft-style asm block has been processed within
187 /// this fuction. These can potentially set the return value.
188 bool sawAsmBlock = false;
189
190 /// In C++, whether we are code generating a thunk. This controls whether we
191 /// should emit cleanups.
192 bool curFuncIsThunk = false;
193
194 mlir::Type convertTypeForMem(QualType t);
195
196 mlir::Type convertType(clang::QualType t);
197 mlir::Type convertType(const TypeDecl *t) {
198 return convertType(getContext().getTypeDeclType(t));
199 }
200
201 /// Get integer from a mlir::Value that is an int constant or a constant op.
202 static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
203 auto constOp = val.getDefiningOp<cir::ConstantOp>();
204 assert(constOp && "getSExtIntValueFromConstOp call with non ConstantOp");
205 return constOp.getIntValue().getSExtValue();
206 }
207
208 /// Get zero-extended integer from a mlir::Value that is an int constant or a
209 /// constant op.
210 static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
211 auto constOp = val.getDefiningOp<cir::ConstantOp>();
212 assert(constOp && "getZExtIntValueFromConstOp call with non ConstantOp");
213 return constOp.getIntValue().getZExtValue();
214 }
215
216 /// Return the cir::TypeEvaluationKind of QualType \c type.
218
222
226
228 bool suppressNewContext = false);
230
231 CIRGenTypes &getTypes() const { return cgm.getTypes(); }
232
233 const TargetInfo &getTarget() const { return cgm.getTarget(); }
234 mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
235
237 return cgm.getTargetCIRGenInfo();
238 }
239
240 // ---------------------
241 // Opaque value handling
242 // ---------------------
243
244 /// Keeps track of the current set of opaque value expressions.
245 llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
246 llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
247
248 // This keeps track of the associated size for each VLA type.
249 // We track this by the size expression rather than the type itself because
250 // in certain situations, like a const qualifier applied to an VLA typedef,
251 // multiple VLA types can share the same size expression.
252 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
253 // enter/leave scopes.
254 llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
255
256public:
257 /// A non-RAII class containing all the information about a bound
258 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
259 /// this which makes individual mappings very simple; using this
260 /// class directly is useful when you have a variable number of
261 /// opaque values or don't want the RAII functionality for some
262 /// reason.
263 class OpaqueValueMappingData {
264 const OpaqueValueExpr *opaqueValue;
265 bool boundLValue;
266
267 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
268 : opaqueValue(ov), boundLValue(boundLValue) {}
269
270 public:
271 OpaqueValueMappingData() : opaqueValue(nullptr) {}
272
273 static bool shouldBindAsLValue(const Expr *expr) {
274 // gl-values should be bound as l-values for obvious reasons.
275 // Records should be bound as l-values because IR generation
276 // always keeps them in memory. Expressions of function type
277 // act exactly like l-values but are formally required to be
278 // r-values in C.
279 return expr->isGLValue() || expr->getType()->isFunctionType() ||
281 }
282
284 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
285 if (shouldBindAsLValue(ov))
286 return bind(cgf, ov, cgf.emitLValue(e));
287 return bind(cgf, ov, cgf.emitAnyExpr(e));
288 }
289
291 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
292 assert(shouldBindAsLValue(ov));
293 cgf.opaqueLValues.insert(std::make_pair(ov, lv));
294 return OpaqueValueMappingData(ov, true);
295 }
296
298 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
299 assert(!shouldBindAsLValue(ov));
300 cgf.opaqueRValues.insert(std::make_pair(ov, rv));
301
302 OpaqueValueMappingData data(ov, false);
303
304 // Work around an extremely aggressive peephole optimization in
305 // EmitScalarConversion which assumes that all other uses of a
306 // value are extant.
308 return data;
309 }
310
311 bool isValid() const { return opaqueValue != nullptr; }
312 void clear() { opaqueValue = nullptr; }
313
315 assert(opaqueValue && "no data to unbind!");
316
317 if (boundLValue) {
318 cgf.opaqueLValues.erase(opaqueValue);
319 } else {
320 cgf.opaqueRValues.erase(opaqueValue);
322 }
323 }
324 };
325
326 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
328 CIRGenFunction &cgf;
330
331 public:
335
336 /// Build the opaque value mapping for the given conditional
337 /// operator if it's the GNU ?: extension. This is a common
338 /// enough pattern that the convenience operator is really
339 /// helpful.
340 ///
343 : cgf(cgf) {
344 if (mlir::isa<ConditionalOperator>(op))
345 // Leave Data empty.
346 return;
347
349 mlir::cast<BinaryConditionalOperator>(op);
351 e->getCommon());
352 }
353
354 /// Build the opaque value mapping for an OpaqueValueExpr whose source
355 /// expression is set to the expression the OVE represents.
357 : cgf(cgf) {
358 if (ov) {
359 assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
360 "for OVE with no source expression");
361 data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
362 }
363 }
364
366 LValue lvalue)
367 : cgf(cgf),
368 data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
369
371 RValue rvalue)
372 : cgf(cgf),
373 data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
374
375 void pop() {
376 data.unbind(cgf);
377 data.clear();
378 }
379
381 if (data.isValid())
382 data.unbind(cgf);
383 }
384 };
385
386private:
387 /// Declare a variable in the current scope, return success if the variable
388 /// wasn't declared yet.
389 void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
390 mlir::Location loc, clang::CharUnits alignment,
391 bool isParam = false);
392
393public:
394 mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
395
396 void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
397
398private:
399 // Track current variable initialization (if there's one)
400 const clang::VarDecl *currVarDecl = nullptr;
401 class VarDeclContext {
403 const clang::VarDecl *oldVal = nullptr;
404
405 public:
406 VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
407 if (p.currVarDecl)
408 oldVal = p.currVarDecl;
409 p.currVarDecl = value;
410 }
411
412 /// Can be used to restore the state early, before the dtor
413 /// is run.
414 void restore() { p.currVarDecl = oldVal; }
415 ~VarDeclContext() { restore(); }
416 };
417
418public:
419 /// Use to track source locations across nested visitor traversals.
420 /// Always use a `SourceLocRAIIObject` to change currSrcLoc.
421 std::optional<mlir::Location> currSrcLoc;
423 CIRGenFunction &cgf;
424 std::optional<mlir::Location> oldLoc;
425
426 public:
427 SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
428 if (cgf.currSrcLoc)
429 oldLoc = cgf.currSrcLoc;
430 cgf.currSrcLoc = value;
431 }
432
433 /// Can be used to restore the state early, before the dtor
434 /// is run.
435 void restore() { cgf.currSrcLoc = oldLoc; }
437 };
438
440 llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
441
442 /// Hold counters for incrementally naming temporaries
443 unsigned counterRefTmp = 0;
444 unsigned counterAggTmp = 0;
445 std::string getCounterRefTmpAsString();
446 std::string getCounterAggTmpAsString();
447
448 /// Helpers to convert Clang's SourceLocation to a MLIR Location.
449 mlir::Location getLoc(clang::SourceLocation srcLoc);
450 mlir::Location getLoc(clang::SourceRange srcLoc);
451 mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
452
453 const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
454
455 /// True if an insertion point is defined. If not, this indicates that the
456 /// current code being emitted is unreachable.
457 /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
458 /// since we don't yet force null insertion point to designate behavior (like
459 /// LLVM's codegen does) and we probably shouldn't.
460 bool haveInsertPoint() const {
461 return builder.getInsertionBlock() != nullptr;
462 }
463
464 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
465 // an ObjCMethodDecl.
467 llvm::PointerUnion<const clang::FunctionProtoType *,
468 const clang::ObjCMethodDecl *>
470
473 };
474
476
477 /// An abstract representation of regular/ObjC call/message targets.
479 /// The function declaration of the callee.
480 [[maybe_unused]] const clang::Decl *calleeDecl;
481
482 public:
483 AbstractCallee() : calleeDecl(nullptr) {}
484 AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
485
486 bool hasFunctionDecl() const {
487 return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
488 }
489
490 const clang::Decl *getDecl() const { return calleeDecl; }
491
492 unsigned getNumParams() const {
493 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
494 return fd->getNumParams();
495 return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
496 }
497
498 const clang::ParmVarDecl *getParamDecl(unsigned I) const {
499 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
500 return fd->getParamDecl(I);
501 return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
502 I);
503 }
504 };
505
506 struct VlaSizePair {
507 mlir::Value numElts;
509
510 VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
511 };
512
513 /// Return the number of elements for a single dimension
514 /// for the given array type.
515 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
516
517 /// Returns an MLIR::Value+QualType pair that corresponds to the size,
518 /// in non-variably-sized elements, of a variable length array type,
519 /// plus that largest non-variably-sized element type. Assumes that
520 /// the type has already been emitted with emitVariablyModifiedType.
521 VlaSizePair getVLASize(const VariableArrayType *type);
522 VlaSizePair getVLASize(QualType type);
523
525
526 mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
527 return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
528 }
529
530 void finishFunction(SourceLocation endLoc);
531
532 /// Determine whether the given initializer is trivial in the sense
533 /// that it requires no code to be generated.
534 bool isTrivialInitializer(const Expr *init);
535
536 /// If the specified expression does not fold to a constant, or if it does but
537 /// contains a label, return false. If it constant folds return true and set
538 /// the boolean result in Result.
539 bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
540 bool allowLabels = false);
542 llvm::APSInt &resultInt,
543 bool allowLabels = false);
544
545 /// Return true if the statement contains a label in it. If
546 /// this statement is not executed normally, it not containing a label means
547 /// that we can just remove the code.
548 bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
549
550 Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
551
552 class ConstantEmission {
553 // Cannot use mlir::TypedAttr directly here because of bit availability.
554 llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
555 ConstantEmission(mlir::TypedAttr c, bool isReference)
556 : valueAndIsReference(c, isReference) {}
557
558 public:
560 static ConstantEmission forReference(mlir::TypedAttr c) {
561 return ConstantEmission(c, true);
562 }
563 static ConstantEmission forValue(mlir::TypedAttr c) {
564 return ConstantEmission(c, false);
565 }
566
567 explicit operator bool() const {
568 return valueAndIsReference.getOpaqueValue() != nullptr;
569 }
570
571 bool isReference() const { return valueAndIsReference.getInt(); }
573 assert(isReference());
574 cgf.cgm.errorNYI(refExpr->getSourceRange(),
575 "ConstantEmission::getReferenceLValue");
576 return {};
577 }
578
579 mlir::TypedAttr getValue() const {
580 assert(!isReference());
581 return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
582 }
583 };
584
585 ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
586 ConstantEmission tryEmitAsConstant(const MemberExpr *me);
587
590 /// The address of the alloca for languages with explicit address space
591 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
592 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
593 /// as a global constant.
595
596 /// True if the variable is of aggregate type and has a constant
597 /// initializer.
599
600 /// True if the variable is a __block variable that is captured by an
601 /// escaping block.
602 bool isEscapingByRef = false;
603
604 /// True if the variable was emitted as an offload recipe, and thus doesn't
605 /// have the same sort of alloca initialization.
606 bool emittedAsOffload = false;
607
608 mlir::Value nrvoFlag{};
609
610 struct Invalid {};
612
615
617
618 bool wasEmittedAsGlobal() const { return !addr.isValid(); }
619
621
622 /// Returns the raw, allocated address, which is not necessarily
623 /// the address of the object itself. It is casted to default
624 /// address space for address space agnostic languages.
625 Address getAllocatedAddress() const { return addr; }
626
627 // Changes the stored address for the emission. This function should only
628 // be used in extreme cases, and isn't required to model normal AST
629 // initialization/variables.
631
632 /// Returns the address of the object within this declaration.
633 /// Note that this does not chase the forwarding pointer for
634 /// __block decls.
636 if (!isEscapingByRef)
637 return addr;
638
640 return Address::invalid();
641 }
642 };
643
644 /// IndirectBranch - The first time an indirect goto is seen we create a block
645 /// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
646 /// is emitted at the end of the function, once all block destinations have
647 /// been resolved.
648 mlir::Block *indirectGotoBlock = nullptr;
649
652
653 /// Perform the usual unary conversions on the specified expression and
654 /// compare the result against zero, returning an Int1Ty value.
655 mlir::Value evaluateExprAsBool(const clang::Expr *e);
656
657 cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
658 cir::GlobalOp gv,
659 cir::GetGlobalOp gvAddr);
660
661 /// Enter the cleanups necessary to complete the given phase of destruction
662 /// for a destructor. The end result should call destructors on members and
663 /// base classes in reverse order of their construction.
665
666 /// Determines whether an EH cleanup is required to destroy a type
667 /// with the given destruction kind.
668 /// TODO(cir): could be shared with Clang LLVM codegen
670 switch (kind) {
672 return false;
676 return getLangOpts().Exceptions;
678 return getLangOpts().Exceptions &&
679 cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
680 }
681 llvm_unreachable("bad destruction kind");
682 }
683
687
689
690 /// Set the address of a local variable.
692 assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
693 localDeclMap.insert({vd, addr});
694
695 // Add to the symbol table if not there already.
696 if (symbolTable.count(vd))
697 return;
698 symbolTable.insert(vd, addr.getPointer());
699 }
700
701 // Replaces the address of the local variable, if it exists. Else does the
702 // same thing as setAddrOfLocalVar.
704 localDeclMap.insert_or_assign(vd, addr);
705 }
706
707 // A class to allow reverting changes to a var-decl's registration to the
708 // localDeclMap. This is used in cases where things are being inserted into
709 // the variable list but don't follow normal lookup/search rules, like in
710 // OpenACC recipe generation.
712 CIRGenFunction &cgf;
713 const VarDecl *vd;
714 bool shouldDelete = false;
715 Address oldAddr = Address::invalid();
716
717 public:
719 : cgf(cgf), vd(vd) {
720 auto mapItr = cgf.localDeclMap.find(vd);
721
722 if (mapItr != cgf.localDeclMap.end())
723 oldAddr = mapItr->second;
724 else
725 shouldDelete = true;
726 }
727
729 if (shouldDelete)
730 cgf.localDeclMap.erase(vd);
731 else
732 cgf.localDeclMap.insert_or_assign(vd, oldAddr);
733 }
734 };
735
737
740
741 static bool
743
750
753
757 const clang::CXXRecordDecl *nearestVBase,
758 clang::CharUnits offsetFromNearestVBase,
759 bool baseIsNonVirtualPrimaryBase,
760 const clang::CXXRecordDecl *vtableClass,
761 VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
762 /// Return the Value of the vtable pointer member pointed to by thisAddr.
763 mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
764 const clang::CXXRecordDecl *vtableClass);
765
766 /// Returns whether we should perform a type checked load when loading a
767 /// virtual function for virtual calls to members of RD. This is generally
768 /// true when both vcall CFI and whole-program-vtables are enabled.
770
771 /// Source location information about the default argument or member
772 /// initializer expression we're evaluating, if any.
776
777 /// A scope within which we are constructing the fields of an object which
778 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
779 /// we need to evaluate the CXXDefaultInitExpr within the evaluation.
781 public:
783 : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
784 cgf.cxxDefaultInitExprThis = thisAddr;
785 }
787 cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
788 }
789
790 private:
791 CIRGenFunction &cgf;
792 Address oldCXXDefaultInitExprThis;
793 };
794
795 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
796 /// is overridden to be the object under construction.
798 public:
803 cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
804 cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
805 }
807 cgf.cxxThisValue = oldCXXThisValue;
808 cgf.cxxThisAlignment = oldCXXThisAlignment;
809 }
810
811 public:
813 mlir::Value oldCXXThisValue;
816 };
817
822
824 LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
825
826 /// Construct an address with the natural alignment of T. If a pointer to T
827 /// is expected to be signed, the pointer passed to this function must have
828 /// been signed, and the returned Address will have the pointer authentication
829 /// information needed to authenticate the signed pointer.
831 CharUnits alignment,
832 bool forPointeeType = false,
833 LValueBaseInfo *baseInfo = nullptr) {
834 if (alignment.isZero())
835 alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
836 return Address(ptr, convertTypeForMem(t), alignment);
837 }
838
840 Address value, const CXXRecordDecl *derived,
841 llvm::iterator_range<CastExpr::path_const_iterator> path,
842 bool nullCheckValue, SourceLocation loc);
843
845 mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
846 llvm::iterator_range<CastExpr::path_const_iterator> path,
847 bool nullCheckValue);
848
849 /// Return the VTT parameter that should be passed to a base
850 /// constructor/destructor with virtual bases.
851 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
852 /// to ItaniumCXXABI.cpp together with all the references to VTT.
853 mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
854 bool delegating);
855
858 return makeAddrLValue(addr, ty, LValueBaseInfo(source));
859 }
860
862 return LValue::makeAddr(addr, ty, baseInfo);
863 }
864
865 void initializeVTablePointers(mlir::Location loc,
866 const clang::CXXRecordDecl *rd);
867 void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
868
870
871 /// Return the address of a local variable.
873 auto it = localDeclMap.find(vd);
874 assert(it != localDeclMap.end() &&
875 "Invalid argument to getAddrOfLocalVar(), no decl!");
876 return it->second;
877 }
878
880 mlir::Type fieldType, unsigned index);
881
882 /// Given an opaque value expression, return its LValue mapping if it exists,
883 /// otherwise create one.
885
886 /// Given an opaque value expression, return its RValue mapping if it exists,
887 /// otherwise create one.
889
890 /// Load the value for 'this'. This function is only valid while generating
891 /// code for an C++ member function.
892 /// FIXME(cir): this should return a mlir::Value!
893 mlir::Value loadCXXThis() {
894 assert(cxxThisValue && "no 'this' value for this function");
895 return cxxThisValue;
896 }
898
899 /// Load the VTT parameter to base constructors/destructors have virtual
900 /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
901 /// be abstracted properly.
902 mlir::Value loadCXXVTT() {
903 assert(cxxStructorImplicitParamValue && "no VTT value for this function");
905 }
906
907 /// Convert the given pointer to a complete class to the given direct base.
909 Address value,
910 const CXXRecordDecl *derived,
911 const CXXRecordDecl *base,
912 bool baseIsVirtual);
913
914 /// Determine whether a return value slot may overlap some other object.
916 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
917 // class subobjects. These cases may need to be revisited depending on the
918 // resolution of the relevant core issue.
920 }
921
922 /// Determine whether a base class initialization may overlap some other
923 /// object.
925 const CXXRecordDecl *baseRD,
926 bool isVirtual);
927
928 /// Get an appropriate 'undef' rvalue for the given type.
929 /// TODO: What's the equivalent for MLIR? Currently we're only using this for
930 /// void types so it just returns RValue::get(nullptr) but it'll need
931 /// addressed later.
933
934 cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
935 cir::FuncType funcType);
936
938 FunctionArgList &args);
939
940 /// Emit the function prologue: declare function arguments in the symbol
941 /// table.
942 void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
943 const FunctionDecl *fd, SourceLocation bodyBeginLoc);
944
945 /// Emit code for the start of a function.
946 /// \param loc The location to be associated with the function.
947 /// \param startLoc The location of the function body.
949 cir::FuncOp fn, cir::FuncType funcType,
951 clang::SourceLocation startLoc);
952
953 /// returns true if aggregate type has a volatile member.
955 if (const auto *rd = t->getAsRecordDecl())
956 return rd->hasVolatileMember();
957 return false;
958 }
959
960 void addCatchHandlerAttr(const CXXCatchStmt *catchStmt,
961 SmallVector<mlir::Attribute> &handlerAttrs);
962
963 /// The cleanup depth enclosing all the cleanups associated with the
964 /// parameters.
966
968
969 /// Takes the old cleanup stack size and emits the cleanup blocks
970 /// that have been added.
971 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
972 ArrayRef<mlir::Value *> valuesToReload = {});
973 void popCleanupBlock();
974
975 /// Deactivates the given cleanup block. The block cannot be reactivated. Pops
976 /// it if it's the top of the stack.
977 ///
978 /// \param DominatingIP - An instruction which is known to
979 /// dominate the current IP (if set) and which lies along
980 /// all paths of execution between the current IP and the
981 /// the point at which the cleanup comes into scope.
983 mlir::Operation *dominatingIP);
984
985 /// Push a cleanup to be run at the end of the current full-expression. Safe
986 /// against the possibility that we're currently inside a
987 /// conditionally-evaluated expression.
988 template <class T, class... As>
990 // If we're not in a conditional branch, or if none of the
991 // arguments requires saving, then use the unconditional cleanup.
993 return ehStack.pushCleanup<T>(kind, a...);
994
995 cgm.errorNYI("pushFullExprCleanup in conditional branch");
996 }
997
998 /// Enters a new scope for capturing cleanups, all of which
999 /// will be executed once the scope is exited.
1000 class RunCleanupsScope {
1001 EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
1002
1003 protected:
1006
1007 private:
1008 RunCleanupsScope(const RunCleanupsScope &) = delete;
1009 void operator=(const RunCleanupsScope &) = delete;
1010
1011 protected:
1013
1014 public:
1015 /// Enter a new cleanup scope.
1017 : performCleanup(true), cgf(cgf) {
1018 cleanupStackDepth = cgf.ehStack.stable_begin();
1019 oldDidCallStackSave = cgf.didCallStackSave;
1020 cgf.didCallStackSave = false;
1021 oldCleanupStackDepth = cgf.currentCleanupStackDepth;
1022 cgf.currentCleanupStackDepth = cleanupStackDepth;
1023 }
1024
1025 /// Exit this cleanup scope, emitting any accumulated cleanups.
1027 if (performCleanup)
1028 forceCleanup();
1029 }
1030
1031 /// Force the emission of cleanups now, instead of waiting
1032 /// until this object is destroyed.
1033 void forceCleanup(ArrayRef<mlir::Value *> valuesToReload = {}) {
1034 assert(performCleanup && "Already forced cleanup");
1036 cgf.popCleanupBlocks(cleanupStackDepth, valuesToReload);
1037 performCleanup = false;
1038 cgf.currentCleanupStackDepth = oldCleanupStackDepth;
1039 }
1040 };
1041
1042 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1044
1045public:
1046 /// Represents a scope, including function bodies, compound statements, and
1047 /// the substatements of if/while/do/for/switch/try statements. This class
1048 /// handles any automatic cleanup, along with the return value.
1049 struct LexicalScope : public RunCleanupsScope {
1050 private:
1051 // Block containing cleanup code for things initialized in this
1052 // lexical context (scope).
1053 mlir::Block *cleanupBlock = nullptr;
1054
1055 // Points to the scope entry block. This is useful, for instance, for
1056 // helping to insert allocas before finalizing any recursive CodeGen from
1057 // switches.
1058 mlir::Block *entryBlock;
1059
1060 LexicalScope *parentScope = nullptr;
1061
1062 // Holds the actual value for ScopeKind::Try
1063 cir::TryOp tryOp = nullptr;
1064
1065 // On a coroutine body, the OnFallthrough sub stmt holds the handler
1066 // (CoreturnStmt) for control flow falling off the body. Keep track
1067 // of emitted co_return in this scope and allow OnFallthrough to be
1068 // skipeed.
1069 bool hasCoreturnStmt = false;
1070
1071 // Only Regular is used at the moment. Support for other kinds will be
1072 // added as the relevant statements/expressions are upstreamed.
1073 enum Kind {
1074 Regular, // cir.if, cir.scope, if_regions
1075 Ternary, // cir.ternary
1076 Switch, // cir.switch
1077 Try, // cir.try
1078 GlobalInit // cir.global initialization code
1079 };
1080 Kind scopeKind = Kind::Regular;
1081
1082 // The scope return value.
1083 mlir::Value retVal = nullptr;
1084
1085 mlir::Location beginLoc;
1086 mlir::Location endLoc;
1087
1088 public:
1089 unsigned depth = 0;
1090
1091 LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
1092 : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
1093 beginLoc(loc), endLoc(loc) {
1094
1095 assert(entryBlock && "LexicalScope requires an entry block");
1096 cgf.curLexScope = this;
1097 if (parentScope)
1098 ++depth;
1099
1100 if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
1101 assert(fusedLoc.getLocations().size() == 2 && "too many locations");
1102 beginLoc = fusedLoc.getLocations()[0];
1103 endLoc = fusedLoc.getLocations()[1];
1104 }
1105 }
1106
1107 void setRetVal(mlir::Value v) { retVal = v; }
1108
1109 void cleanup();
1110 void restore() { cgf.curLexScope = parentScope; }
1111
1114 cleanup();
1115 restore();
1116 }
1117
1118 // ---
1119 // Coroutine tracking
1120 // ---
1121 bool hasCoreturn() const { return hasCoreturnStmt; }
1122 void setCoreturn() { hasCoreturnStmt = true; }
1123
1124 // ---
1125 // Kind
1126 // ---
1127 bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
1128 bool isRegular() { return scopeKind == Kind::Regular; }
1129 bool isSwitch() { return scopeKind == Kind::Switch; }
1130 bool isTernary() { return scopeKind == Kind::Ternary; }
1131 bool isTry() { return scopeKind == Kind::Try; }
1132 cir::TryOp getClosestTryParent();
1133 void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
1134 void setAsSwitch() { scopeKind = Kind::Switch; }
1135 void setAsTernary() { scopeKind = Kind::Ternary; }
1136 void setAsTry(cir::TryOp op) {
1137 scopeKind = Kind::Try;
1138 tryOp = op;
1139 }
1140
1141 // Lazy create cleanup block or return what's available.
1142 mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
1143 if (cleanupBlock)
1144 return cleanupBlock;
1145 cleanupBlock = createCleanupBlock(builder);
1146 return cleanupBlock;
1147 }
1148
1149 cir::TryOp getTry() {
1150 assert(isTry());
1151 return tryOp;
1152 }
1153
1154 mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
1155 return cleanupBlock;
1156 }
1157
1158 mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
1159 // Create the cleanup block but dont hook it up around just yet.
1160 mlir::OpBuilder::InsertionGuard guard(builder);
1161 mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
1162 : &cgf.curFn->getRegion(0);
1163 cleanupBlock = builder.createBlock(r);
1164 return cleanupBlock;
1165 }
1166
1167 // ---
1168 // Return handling.
1169 // ---
1170
1171 private:
1172 // On switches we need one return block per region, since cases don't
1173 // have their own scopes but are distinct regions nonetheless.
1174
1175 // TODO: This implementation should change once we have support for early
1176 // exits in MLIR structured control flow (llvm-project#161575)
1178 llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
1179 llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
1180 std::optional<unsigned> normalRetBlockIndex;
1181
1182 // There's usually only one ret block per scope, but this needs to be
1183 // get or create because of potential unreachable return statements, note
1184 // that for those, all source location maps to the first one found.
1185 mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1186 assert((isa_and_nonnull<cir::CaseOp>(
1187 cgf.builder.getBlock()->getParentOp()) ||
1188 retBlocks.size() == 0) &&
1189 "only switches can hold more than one ret block");
1190
1191 // Create the return block but don't hook it up just yet.
1192 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
1193 auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
1194 retBlocks.push_back(b);
1195 updateRetLoc(b, loc);
1196 return b;
1197 }
1198
1199 cir::ReturnOp emitReturn(mlir::Location loc);
1200 void emitImplicitReturn();
1201
1202 public:
1204 mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
1205 void updateRetLoc(mlir::Block *b, mlir::Location loc) {
1206 retLocs.insert_or_assign(b, loc);
1207 }
1208
1209 mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1210 // Check if we're inside a case region
1211 if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
1212 cgf.builder.getBlock()->getParentOp())) {
1213 auto iter = retBlockInCaseIndex.find(caseOp);
1214 if (iter != retBlockInCaseIndex.end()) {
1215 // Reuse existing return block
1216 mlir::Block *ret = retBlocks[iter->second];
1217 updateRetLoc(ret, loc);
1218 return ret;
1219 }
1220 // Create new return block
1221 mlir::Block *ret = createRetBlock(cgf, loc);
1222 retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
1223 return ret;
1224 }
1225
1226 if (normalRetBlockIndex) {
1227 mlir::Block *ret = retBlocks[*normalRetBlockIndex];
1228 updateRetLoc(ret, loc);
1229 return ret;
1230 }
1231
1232 mlir::Block *ret = createRetBlock(cgf, loc);
1233 normalRetBlockIndex = retBlocks.size() - 1;
1234 return ret;
1235 }
1236
1237 mlir::Block *getEntryBlock() { return entryBlock; }
1238 };
1239
1241
1242 typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
1243
1245
1246 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
1247 QualType type);
1248
1250 Destroyer *destroyer);
1251
1253
1254 /// Start generating a thunk function.
1255 void startThunk(cir::FuncOp fn, GlobalDecl gd,
1256 const CIRGenFunctionInfo &fnInfo, bool isUnprototyped);
1257
1258 /// Finish generating a thunk function.
1259 void finishThunk();
1260
1261 /// Generate code for a thunk function.
1262 void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo,
1263 GlobalDecl gd, const ThunkInfo &thunk,
1264 bool isUnprototyped);
1265
1266 /// ----------------------
1267 /// CIR emit functions
1268 /// ----------------------
1269public:
1270 bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr,
1272 clang::SVETypeFlags typeFlags);
1273 mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts,
1274 mlir::Location loc);
1275 std::optional<mlir::Value>
1276 emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
1278 llvm::Triple::ArchType arch);
1279 std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
1280 const CallExpr *expr);
1281 std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
1282 const CallExpr *expr);
1283
1284 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
1285 SourceLocation loc,
1286 SourceLocation assumptionLoc,
1287 int64_t alignment,
1288 mlir::Value offsetValue = nullptr);
1289
1290 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
1291 SourceLocation assumptionLoc,
1292 int64_t alignment,
1293 mlir::Value offsetValue = nullptr);
1294
1295private:
1296 void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
1297 clang::CharUnits alignment);
1298
1299 CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
1300
1301public:
1303 llvm::StringRef fieldName,
1304 unsigned fieldIndex);
1305
1306 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1307 mlir::Location loc, clang::CharUnits alignment,
1308 bool insertIntoFnEntryBlock,
1309 mlir::Value arraySize = nullptr);
1310 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1311 mlir::Location loc, clang::CharUnits alignment,
1312 mlir::OpBuilder::InsertPoint ip,
1313 mlir::Value arraySize = nullptr);
1314
1315 void emitAggregateStore(mlir::Value value, Address dest);
1316
1317 void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
1318
1320
1322
1323 /// Emit an aggregate copy.
1324 ///
1325 /// \param isVolatile \c true iff either the source or the destination is
1326 /// volatile.
1327 /// \param MayOverlap Whether the tail padding of the destination might be
1328 /// occupied by some other object. More efficient code can often be
1329 /// generated if not.
1330 void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
1331 AggValueSlot::Overlap_t mayOverlap,
1332 bool isVolatile = false);
1333
1334 /// Emit code to compute the specified expression which can have any type. The
1335 /// result is returned as an RValue struct. If this is an aggregate
1336 /// expression, the aggloc/agglocvolatile arguments indicate where the result
1337 /// should be returned.
1340 bool ignoreResult = false);
1341
1342 /// Emits the code necessary to evaluate an arbitrary expression into the
1343 /// given memory location.
1344 void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
1345 bool isInitializer);
1346
1347 /// Similarly to emitAnyExpr(), however, the result will always be accessible
1348 /// even if no aggregate location is provided.
1350
1351 void emitAnyExprToExn(const Expr *e, Address addr);
1352
1353 void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
1354 QualType elementType, CharUnits elementAlign,
1355 Destroyer *destroyer);
1356
1357 mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
1358 QualType &baseType, Address &addr);
1360
1362
1364 LValueBaseInfo *baseInfo = nullptr);
1365
1366 std::pair<mlir::Value, mlir::Type>
1368 QualType inputType, std::string &constraintString,
1369 SourceLocation loc);
1370 std::pair<mlir::Value, mlir::Type>
1371 emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr,
1372 std::string &constraintString);
1373 mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
1374
1376 void emitAtomicInit(Expr *init, LValue dest);
1377 void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
1378 void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
1379 bool isVolatile, bool isInit);
1381 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
1382 llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
1383
1384 mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s);
1385
1386 AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
1387 mlir::OpBuilder::InsertPoint ip = {});
1388
1389 /// Emit code and set up symbol table for a variable declaration with auto,
1390 /// register, or no storage class specifier. These turn into simple stack
1391 /// objects, globals depending on target.
1392 void emitAutoVarDecl(const clang::VarDecl &d);
1393
1394 void emitAutoVarCleanups(const AutoVarEmission &emission);
1395 /// Emit the initializer for an allocated variable. If this call is not
1396 /// associated with the call to emitAutoVarAlloca (as the address of the
1397 /// emission is not directly an alloca), the allocatedSeparately parameter can
1398 /// be used to suppress the assertions. However, this should only be used in
1399 /// extreme cases, as it doesn't properly reflect the language/AST.
1400 void emitAutoVarInit(const AutoVarEmission &emission);
1401 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1403
1404 void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
1405
1406 void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
1407 CXXCtorInitializer *baseInit);
1408
1410
1411 mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
1412
1413 RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
1414 const clang::CallExpr *e, ReturnValueSlot returnValue);
1415
1416 /// Returns a Value corresponding to the size of the given expression by
1417 /// emitting a `cir.objsize` operation.
1418 ///
1419 /// \param e The expression whose object size to compute
1420 /// \param type Determines the semantics of the object size computation.
1421 /// The type parameter is a 2-bit value where:
1422 /// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
1423 /// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
1424 /// \param resType The result type for the size value
1425 /// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
1426 /// call `cir.objsize` on this value rather than emitting e.
1427 /// \param isDynamic If true, allows runtime evaluation via dynamic mode
1428 mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
1429 cir::IntType resType, mlir::Value emittedE,
1430 bool isDynamic);
1431
1432 mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
1433 unsigned type,
1434 cir::IntType resType,
1435 mlir::Value emittedE,
1436 bool isDynamic);
1437
1438 int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
1439
1441
1442 RValue emitCall(const CIRGenFunctionInfo &funcInfo,
1443 const CIRGenCallee &callee, ReturnValueSlot returnValue,
1444 const CallArgList &args, cir::CIRCallOpInterface *callOp,
1445 mlir::Location loc);
1448 const CallArgList &args,
1449 cir::CIRCallOpInterface *callOrTryCall = nullptr) {
1450 assert(currSrcLoc && "source location must have been set");
1451 return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
1452 *currSrcLoc);
1453 }
1454
1455 RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
1457
1458 /// Emit the call and return for a thunk function.
1459 void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk,
1460 bool isUnprototyped);
1461
1462 void emitCallArg(CallArgList &args, const clang::Expr *e,
1463 clang::QualType argType);
1464 void emitCallArgs(
1465 CallArgList &args, PrototypeWrapper prototype,
1466 llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
1467 AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
1472
1473 template <typename T>
1474 mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
1475 mlir::ArrayAttr value,
1476 cir::CaseOpKind kind,
1477 bool buildingTopLevelCase);
1478
1480
1481 mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
1482 mlir::Type condType,
1483 bool buildingTopLevelCase);
1484
1485 LValue emitCastLValue(const CastExpr *e);
1486
1487 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
1488 /// sanitizer is enabled, a runtime check is also emitted.
1489 mlir::Value emitCheckedArgForAssume(const Expr *e);
1490
1491 /// Emit a conversion from the specified complex type to the specified
1492 /// destination type, where the destination type is an LLVM scalar type.
1493 mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
1494 QualType dstTy, SourceLocation loc);
1495
1498
1500
1501 mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
1502 cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1503 cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1504 cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
1505 cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
1506 mlir::Value coroframeAddr);
1508
1509 void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
1510
1512
1513 mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
1514
1515 mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s);
1516
1518 AggValueSlot dest);
1519
1522 Address arrayBegin, const CXXConstructExpr *e,
1523 bool newPointerIsChecked,
1524 bool zeroInitialize = false);
1526 mlir::Value numElements, Address arrayBase,
1527 const CXXConstructExpr *e,
1528 bool newPointerIsChecked,
1529 bool zeroInitialize);
1531 clang::CXXCtorType type, bool forVirtualBase,
1532 bool delegating, AggValueSlot thisAVS,
1533 const clang::CXXConstructExpr *e);
1534
1536 clang::CXXCtorType type, bool forVirtualBase,
1537 bool delegating, Address thisAddr,
1539
1540 void emitCXXDeleteExpr(const CXXDeleteExpr *e);
1541
1543 bool forVirtualBase, bool delegating,
1544 Address thisAddr, QualType thisTy);
1545
1547 mlir::Value thisVal, QualType thisTy,
1548 mlir::Value implicitParam,
1549 QualType implicitParamTy, const CallExpr *e);
1550
1551 mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
1553
1556
1558 const Expr *e, Address base, mlir::Value memberPtr,
1559 const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
1560
1562 const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
1563 ReturnValueSlot returnValue, mlir::Value thisPtr,
1564 mlir::Value implicitParam, clang::QualType implicitParamTy,
1565 const clang::CallExpr *ce, CallArgList *rtlArgs);
1566
1568 const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
1569 ReturnValueSlot returnValue, bool hasQualifier,
1570 clang::NestedNameSpecifier qualifier, bool isArrow,
1571 const clang::Expr *base);
1572
1575
1576 mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
1577
1578 void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
1579 mlir::Type elementTy, Address beginPtr,
1580 mlir::Value numElements,
1581 mlir::Value allocSizeWithoutCookie);
1582
1583 /// Create a check for a function parameter that may potentially be
1584 /// declared as non-null.
1585 void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc,
1586 AbstractCallee ac, unsigned paramNum);
1587
1589 const CXXMethodDecl *md,
1591
1594
1596
1598 const CallExpr *callExpr,
1600
1601 void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
1602 Address ptr);
1603
1604 void emitCXXThrowExpr(const CXXThrowExpr *e);
1605
1606 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
1607
1609 clang::CXXCtorType ctorType, FunctionArgList &args);
1610
1611 // It's important not to confuse this and emitDelegateCXXConstructorCall.
1612 // Delegating constructors are the C++11 feature. The constructor delegate
1613 // optimization is used to reduce duplication in the base and complete
1614 // constructors where they are substantially the same.
1616 const FunctionArgList &args);
1617
1618 void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
1619 QualType deleteTy);
1620
1621 mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
1622
1623 mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e);
1624 mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
1625
1626 /// Emit an expression as an initializer for an object (variable, field, etc.)
1627 /// at the given location. The expression is not necessarily the normal
1628 /// initializer for the object, and the address is not necessarily
1629 /// its normal location.
1630 ///
1631 /// \param init the initializing expression
1632 /// \param d the object to act as if we're initializing
1633 /// \param lvalue the lvalue to initialize
1634 /// \param capturedByInit true if \p d is a __block variable whose address is
1635 /// potentially changed by the initializer
1636 void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
1637 LValue lvalue, bool capturedByInit = false);
1638
1639 mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
1640
1641 mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
1642
1643 mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
1644
1646
1648 clang::Expr *init);
1649
1651
1652 mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
1653
1654 mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
1655
1656 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
1657
1658 void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
1659
1660 mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
1662 mlir::NamedAttrList attrs = {});
1663
1664 void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc);
1665
1666 /// Emit the computation of the specified expression of scalar type.
1667 mlir::Value emitScalarExpr(const clang::Expr *e,
1668 bool ignoreResultAssign = false);
1669
1670 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
1671 cir::UnaryOpKind kind, bool isPre);
1672
1673 /// Build a debug stoppoint if we are emitting debug info.
1674 void emitStopPoint(const Stmt *s);
1675
1676 // Build CIR for a statement. useCurrentScope should be true if no
1677 // new scopes need be created when finding a compound statement.
1678 mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
1679 llvm::ArrayRef<const Attr *> attrs = {});
1680
1681 mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
1682 bool useCurrentScope);
1683
1684 mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
1685
1686 void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
1687 CallArgList &callArgs);
1688
1689 RValue emitCoawaitExpr(const CoawaitExpr &e,
1690 AggValueSlot aggSlot = AggValueSlot::ignored(),
1691 bool ignoreResult = false);
1692
1693 RValue emitCoyieldExpr(const CoyieldExpr &e,
1694 AggValueSlot aggSlot = AggValueSlot::ignored(),
1695 bool ignoreResult = false);
1696 /// Emit the computation of the specified expression of complex type,
1697 /// returning the result.
1698 mlir::Value emitComplexExpr(const Expr *e);
1699
1700 void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
1701
1702 mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv,
1703 cir::UnaryOpKind op, bool isPre);
1704
1705 LValue emitComplexAssignmentLValue(const BinaryOperator *e);
1706 LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
1707 LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
1708 mlir::Value &result);
1709
1710 mlir::LogicalResult
1711 emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
1712 AggValueSlot slot = AggValueSlot::ignored());
1713
1714 mlir::LogicalResult
1716 Address *lastValue = nullptr,
1717 AggValueSlot slot = AggValueSlot::ignored());
1718
1719 void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
1720 mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
1721 LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
1722
1723 mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
1724 mlir::Type condType,
1725 bool buildingTopLevelCase);
1726
1728 clang::CXXCtorType ctorType,
1729 const FunctionArgList &args,
1731
1732 /// We are performing a delegate call; that is, the current function is
1733 /// delegating to another one. Produce a r-value suitable for passing the
1734 /// given parameter.
1735 void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
1737
1738 /// Emit an `if` on a boolean condition to the specified blocks.
1739 /// FIXME: Based on the condition, this might try to simplify the codegen of
1740 /// the conditional based on the branch.
1741 /// In the future, we may apply code generation simplifications here,
1742 /// similar to those used in classic LLVM codegen
1743 /// See `EmitBranchOnBoolExpr` for inspiration.
1744 mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
1745 const clang::Stmt *thenS,
1746 const clang::Stmt *elseS);
1747 cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
1748 BuilderCallbackRef thenBuilder,
1749 mlir::Location thenLoc,
1750 BuilderCallbackRef elseBuilder,
1751 std::optional<mlir::Location> elseLoc = {});
1752
1753 mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
1754
1755 LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
1756
1757 mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
1758 mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
1759
1760 void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
1761 void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
1762
1763 mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
1764
1765 /// Emit code to compute the specified expression,
1766 /// ignoring the result.
1767 void emitIgnoredExpr(const clang::Expr *e);
1768
1769 RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
1770
1771 /// Load a complex number from the specified l-value.
1772 mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
1773
1774 RValue emitLoadOfExtVectorElementLValue(LValue lv);
1775
1776 /// Given an expression that represents a value lvalue, this method emits
1777 /// the address of the lvalue, then loads the result as an rvalue,
1778 /// returning the rvalue.
1779 RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
1780
1781 Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
1782 LValueBaseInfo *pointeeBaseInfo);
1783 LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
1784 QualType refTy, AlignmentSource source);
1785
1786 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1787 /// care to appropriately convert from the memory representation to
1788 /// the LLVM value representation. The l-value must be a simple
1789 /// l-value.
1790 mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
1791 mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
1792 SourceLocation loc, LValueBaseInfo baseInfo);
1793
1794 /// Emit code to compute a designator that specifies the location
1795 /// of the expression.
1796 /// FIXME: document this function better.
1797 LValue emitLValue(const clang::Expr *e);
1798 LValue emitLValueForBitField(LValue base, const FieldDecl *field);
1799 LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
1800
1801 LValue emitLValueForLambdaField(const FieldDecl *field);
1802 LValue emitLValueForLambdaField(const FieldDecl *field,
1803 mlir::Value thisValue);
1804
1805 /// Like emitLValueForField, excpet that if the Field is a reference, this
1806 /// will return the address of the reference and not the address of the value
1807 /// stored in the reference.
1808 LValue emitLValueForFieldInitialization(LValue base,
1809 const clang::FieldDecl *field,
1810 llvm::StringRef fieldName);
1811
1812 LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
1813
1814 LValue emitMemberExpr(const MemberExpr *e);
1815
1816 /// Emit a musttail call for a thunk with a potentially different ABI.
1817 void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr,
1818 cir::FuncOp callee);
1819
1820 /// Emit a call to an AMDGPU builtin function.
1821 std::optional<mlir::Value> emitAMDGPUBuiltinExpr(unsigned builtinID,
1822 const CallExpr *expr);
1823
1824 LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
1825
1826 LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
1827
1828 /// Given an expression with a pointer type, emit the value and compute our
1829 /// best estimate of the alignment of the pointee.
1830 ///
1831 /// One reasonable way to use this information is when there's a language
1832 /// guarantee that the pointer must be aligned to some stricter value, and
1833 /// we're simply trying to ensure that sufficiently obvious uses of under-
1834 /// aligned objects don't get miscompiled; for example, a placement new
1835 /// into the address of a local variable. In such a case, it's quite
1836 /// reasonable to just ignore the returned alignment when it isn't from an
1837 /// explicit source.
1838 Address emitPointerWithAlignment(const clang::Expr *expr,
1839 LValueBaseInfo *baseInfo = nullptr);
1840
1841 /// Emits a reference binding to the passed in expression.
1842 RValue emitReferenceBindingToExpr(const Expr *e);
1843
1844 mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
1845
1846 RValue emitRotate(const CallExpr *e, bool isRotateLeft);
1847
1848 mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
1849
1850 /// Emit a conversion from the specified type to the specified destination
1851 /// type, both of which are CIR scalar types.
1852 mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
1853 clang::QualType dstType,
1854 clang::SourceLocation loc);
1855
1856 void emitScalarInit(const clang::Expr *init, mlir::Location loc,
1857 LValue lvalue, bool capturedByInit = false);
1858
1859 mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
1860 const Expr *argExpr);
1861
1862 void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
1863
1864 /// Emit a guarded initializer for a static local variable or a static
1865 /// data member of a class template instantiation.
1866 void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp,
1867 bool performInit);
1868
1869 void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
1870 bool isInit);
1871
1872 void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
1873 clang::QualType ty, LValueBaseInfo baseInfo,
1874 bool isInit = false, bool isNontemporal = false);
1875 void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
1876
1877 /// Store the specified rvalue into the specified
1878 /// lvalue, where both are guaranteed to the have the same type, and that type
1879 /// is 'Ty'.
1880 void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
1881
1882 mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
1883
1884 LValue emitStringLiteralLValue(const StringLiteral *e,
1885 llvm::StringRef name = ".str");
1886
1887 mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
1888 mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
1889 bool buildingTopLevelCase);
1890 mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
1891
1892 std::optional<mlir::Value>
1893 emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
1894 ReturnValueSlot &returnValue);
1895
1896 /// Given a value and its clang type, returns the value casted to its memory
1897 /// representation.
1898 /// Note: CIR defers most of the special casting to the final lowering passes
1899 /// to conserve the high level information.
1900 mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
1901
1902 /// EmitFromMemory - Change a scalar value from its memory
1903 /// representation to its value representation.
1904 mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty);
1905
1906 /// Emit a trap instruction, which is used to abort the program in an abnormal
1907 /// way, usually for debugging purposes.
1908 /// \p createNewBlock indicates whether to create a new block for the IR
1909 /// builder. Since the `cir.trap` operation is a terminator, operations that
1910 /// follow a trap cannot be emitted after `cir.trap` in the same block. To
1911 /// ensure these operations get emitted successfully, you need to create a new
1912 /// dummy block and set the insertion point there before continuing from the
1913 /// trap operation.
1914 void emitTrap(mlir::Location loc, bool createNewBlock);
1915
1916 LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
1917
1918 mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
1919
1920 /// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
1921 /// checking is enabled. Otherwise, just emit an unreachable instruction.
1922 /// \p createNewBlock indicates whether to create a new block for the IR
1923 /// builder. Since the `cir.unreachable` operation is a terminator, operations
1924 /// that follow an unreachable point cannot be emitted after `cir.unreachable`
1925 /// in the same block. To ensure these operations get emitted successfully,
1926 /// you need to create a dummy block and set the insertion point there before
1927 /// continuing from the unreachable point.
1928 void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
1929
1930 /// This method handles emission of any variable declaration
1931 /// inside a function, including static vars etc.
1932 void emitVarDecl(const clang::VarDecl &d);
1933
1934 void emitVariablyModifiedType(QualType ty);
1935
1936 mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
1937
1938 std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
1939 const CallExpr *expr);
1940
1941 /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
1942 /// nonnull, if 1\p LHS is marked _Nonnull.
1943 void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
1944 clang::SourceLocation loc);
1945
1946 /// An object to manage conditionally-evaluated expressions.
1948 CIRGenFunction &cgf;
1949 mlir::OpBuilder::InsertPoint insertPt;
1950
1951 public:
1953 : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
1954 ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
1955 : cgf(cgf), insertPt(ip) {}
1956
1958 assert(cgf.outermostConditional != this);
1959 if (!cgf.outermostConditional)
1960 cgf.outermostConditional = this;
1961 }
1962
1964 assert(cgf.outermostConditional != nullptr);
1965 if (cgf.outermostConditional == this)
1966 cgf.outermostConditional = nullptr;
1967 }
1968
1969 /// Returns the insertion point which will be executed prior to each
1970 /// evaluation of the conditional code. In LLVM OG, this method
1971 /// is called getStartingBlock.
1972 mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
1973 };
1974
1976 std::optional<LValue> lhs{}, rhs{};
1977 mlir::Value result{};
1978 };
1979
1980 // Return true if we're currently emitting one branch or the other of a
1981 // conditional expression.
1982 bool isInConditionalBranch() const { return outermostConditional != nullptr; }
1983
1984 void setBeforeOutermostConditional(mlir::Value value, Address addr) {
1985 assert(isInConditionalBranch());
1986 {
1987 mlir::OpBuilder::InsertionGuard guard(builder);
1988 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
1989 builder.createStore(
1990 value.getLoc(), value, addr, /*isVolatile=*/false,
1991 mlir::IntegerAttr::get(
1992 mlir::IntegerType::get(value.getContext(), 64),
1993 (uint64_t)addr.getAlignment().getAsAlign().value()));
1994 }
1995 }
1996
1997 // Points to the outermost active conditional control. This is used so that
1998 // we know if a temporary should be destroyed conditionally.
2000
2001 /// An RAII object to record that we're evaluating a statement
2002 /// expression.
2004 CIRGenFunction &cgf;
2005
2006 /// We have to save the outermost conditional: cleanups in a
2007 /// statement expression aren't conditional just because the
2008 /// StmtExpr is.
2009 ConditionalEvaluation *savedOutermostConditional;
2010
2011 public:
2013 : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
2014 cgf.outermostConditional = nullptr;
2015 }
2016
2018 cgf.outermostConditional = savedOutermostConditional;
2019 }
2020 };
2021
2022 template <typename FuncTy>
2023 ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
2024 const FuncTy &branchGenFunc);
2025
2026 mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
2027 const clang::Stmt *thenS,
2028 const clang::Stmt *elseS);
2029
2030 /// Build a "reference" to a va_list; this is either the address or the value
2031 /// of the expression, depending on how va_list is defined.
2032 Address emitVAListRef(const Expr *e);
2033
2034 /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
2035 ///
2036 /// \param vaList A reference to the \c va_list as emitted by either
2037 /// \c emitVAListRef or \c emitMSVAListRef.
2038 void emitVAStart(mlir::Value vaList);
2039
2040 /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
2041 ///
2042 /// \param vaList A reference to the \c va_list as emitted by either
2043 /// \c emitVAListRef or \c emitMSVAListRef.
2044 void emitVAEnd(mlir::Value vaList);
2045
2046 /// Generate code to get an argument from the passed in pointer
2047 /// and update it accordingly.
2048 ///
2049 /// \param ve The \c VAArgExpr for which to generate code.
2050 ///
2051 /// \param vaListAddr Receives a reference to the \c va_list as emitted by
2052 /// either \c emitVAListRef or \c emitMSVAListRef.
2053 ///
2054 /// \returns SSA value with the argument.
2055 mlir::Value emitVAArg(VAArgExpr *ve);
2056
2057 /// ----------------------
2058 /// CIR build helpers
2059 /// -----------------
2060public:
2061 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2062 const Twine &name = "tmp",
2063 mlir::Value arraySize = nullptr,
2064 bool insertIntoFnEntryBlock = false);
2065 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2066 const Twine &name = "tmp",
2067 mlir::OpBuilder::InsertPoint ip = {},
2068 mlir::Value arraySize = nullptr);
2069 Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
2070 const Twine &name = "tmp",
2071 mlir::Value arraySize = nullptr,
2072 Address *alloca = nullptr,
2073 mlir::OpBuilder::InsertPoint ip = {});
2074 Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
2075 mlir::Location loc,
2076 const Twine &name = "tmp",
2077 mlir::Value arraySize = nullptr,
2078 mlir::OpBuilder::InsertPoint ip = {});
2079 Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc,
2080 const Twine &name);
2081
2082 /// Create a temporary memory object of the given type, with
2083 /// appropriate alignmen and cast it to the default address space. Returns
2084 /// the original alloca instruction by \p Alloca if it is not nullptr.
2085 Address createMemTemp(QualType t, mlir::Location loc,
2086 const Twine &name = "tmp", Address *alloca = nullptr,
2087 mlir::OpBuilder::InsertPoint ip = {});
2088 Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
2089 const Twine &name = "tmp", Address *alloca = nullptr,
2090 mlir::OpBuilder::InsertPoint ip = {});
2091
2092 mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const {
2093 if (cir::GlobalOp globalOp = v.getDefiningOp<cir::GlobalOp>())
2094 cgm.errorNYI("Global op addrspace cast");
2095 return builder.createAddrSpaceCast(v, destTy);
2096 }
2097
2098 //===--------------------------------------------------------------------===//
2099 // OpenMP Emission
2100 //===--------------------------------------------------------------------===//
2101public:
2102 mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
2103 mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
2104 mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
2105 mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
2106 mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
2107 mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
2108 mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
2109 mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
2110 mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
2111 mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
2112 mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
2113 mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
2114 mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
2115 mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
2116 mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
2117 mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
2118 mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
2119 mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
2120 mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
2121 mlir::LogicalResult
2122 emitOMPParallelForDirective(const OMPParallelForDirective &s);
2123 mlir::LogicalResult
2124 emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
2125 mlir::LogicalResult
2126 emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
2127 mlir::LogicalResult
2128 emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
2129 mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
2130 mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
2131 mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
2132 mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
2133 mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
2134 mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
2135 mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
2136 mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
2137 mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
2138 mlir::LogicalResult
2140 mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
2141 mlir::LogicalResult
2143 mlir::LogicalResult
2145 mlir::LogicalResult
2147 mlir::LogicalResult
2149 mlir::LogicalResult
2151 mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
2152 mlir::LogicalResult
2154 mlir::LogicalResult
2156 mlir::LogicalResult
2158 mlir::LogicalResult
2160 mlir::LogicalResult
2162 mlir::LogicalResult
2164 mlir::LogicalResult
2165 emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
2166 mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
2170 mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
2174 mlir::LogicalResult
2176 mlir::LogicalResult emitOMPDistributeParallelForDirective(
2180 mlir::LogicalResult
2184 mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
2186 mlir::LogicalResult
2188 mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
2190 mlir::LogicalResult
2192 mlir::LogicalResult
2194 mlir::LogicalResult
2200 mlir::LogicalResult
2202 mlir::LogicalResult
2204 mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
2212 mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
2213 mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
2214 mlir::LogicalResult
2216 mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
2217 mlir::LogicalResult
2219 mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
2220 mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
2221 mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
2222
2226 void emitOMPAllocateDecl(const OMPAllocateDecl &d);
2229 void emitOMPRequiresDecl(const OMPRequiresDecl &d);
2230
2231private:
2232 template <typename Op>
2233 void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
2234
2235 //===--------------------------------------------------------------------===//
2236 // OpenACC Emission
2237 //===--------------------------------------------------------------------===//
2238private:
2239 template <typename Op>
2240 Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
2242 // Function to do the basic implementation of an operation with an Associated
2243 // Statement. Models AssociatedStmtConstruct.
2244 template <typename Op, typename TermOp>
2245 mlir::LogicalResult
2246 emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
2247 OpenACCDirectiveKind dirKind,
2249 const Stmt *associatedStmt);
2250
2251 template <typename Op, typename TermOp>
2252 mlir::LogicalResult emitOpenACCOpCombinedConstruct(
2253 mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
2254 llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
2255
2256 template <typename Op>
2257 void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
2259 // The second template argument doesn't need to be a template, since it should
2260 // always be an mlir::acc::LoopOp, but as this is a template anyway, we make
2261 // it a template argument as this way we can avoid including the OpenACC MLIR
2262 // headers here. We will count on linker failures/explicit instantiation to
2263 // ensure we don't mess this up, but it is only called from 1 place, and
2264 // instantiated 3x.
2265 template <typename ComputeOp, typename LoopOp>
2266 void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
2267 OpenACCDirectiveKind dirKind,
2269
2270 // The OpenACC LoopOp requires that we have auto, seq, or independent on all
2271 // LoopOp operations for the 'none' device type case. This function checks if
2272 // the LoopOp has one, else it updates it to have one.
2273 void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
2275
2276 // The OpenACC 'cache' construct actually applies to the 'loop' if present. So
2277 // keep track of the 'loop' so that we can add the cache vars to it correctly.
2278 mlir::acc::LoopOp *activeLoopOp = nullptr;
2279
2280 struct ActiveOpenACCLoopRAII {
2281 CIRGenFunction &cgf;
2282 mlir::acc::LoopOp *oldLoopOp;
2283
2284 ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
2285 : cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
2286 cgf.activeLoopOp = newOp;
2287 }
2288 ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
2289 };
2290
2291 // Keep track of the last place we inserted a 'recipe' so that we can insert
2292 // the next one in lexical order.
2293 mlir::OpBuilder::InsertPoint lastRecipeLocation;
2294
2295public:
2296 // Helper type used to store the list of important information for a 'data'
2297 // clause variable, or a 'cache' variable reference.
2299 mlir::Location beginLoc;
2300 mlir::Value varValue;
2301 std::string name;
2302 // The type of the original variable reference: that is, after 'bounds' have
2303 // removed pointers/array types/etc. So in the case of int arr[5], and a
2304 // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
2308 // The list of types that we found when going through the bounds, which we
2309 // can use to properly set the alloca section.
2311 };
2312
2313 // Gets the collection of info required to lower and OpenACC clause or cache
2314 // construct variable reference.
2316 // Helper function to emit the integer expressions as required by an OpenACC
2317 // clause/construct.
2318 mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
2319 // Helper function to emit an integer constant as an mlir int type, used for
2320 // constants in OpenACC constructs/clauses.
2321 mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
2322 int64_t value);
2323
2324 mlir::LogicalResult
2326 mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
2327 mlir::LogicalResult
2329 mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
2330 mlir::LogicalResult
2332 mlir::LogicalResult
2334 mlir::LogicalResult
2336 mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
2337 mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
2338 mlir::LogicalResult
2340 mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
2341 mlir::LogicalResult
2343 mlir::LogicalResult
2345 mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
2346
2349
2350 /// Create a temporary memory object for the given aggregate type.
2351 AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
2352 const Twine &name = "tmp",
2353 Address *alloca = nullptr) {
2355 return AggValueSlot::forAddr(
2356 createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
2359 }
2360
2361private:
2362 QualType getVarArgType(const Expr *arg);
2363};
2364
2365} // namespace clang::CIRGen
2366
2367#endif
Defines the clang::ASTContext interface.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
Defines the clang::Expr interface and subclasses for C++ expressions.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
Enumerates target-specific builtins in their own namespaces within namespace clang.
C Language Family Type Representation.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
This represents 'pragma omp cancel' directive.
This represents 'pragma omp cancellation point' directive.
This represents 'pragma omp dispatch' directive.
This represents 'pragma omp distribute' directive.
This represents 'pragma omp distribute parallel for' composite directive.
This represents 'pragma omp distribute parallel for simd' composite directive.
This represents 'pragma omp distribute simd' composite directive.
This represents 'pragma omp error' directive.
Represents the 'pragma omp fuse' loop transformation directive.
This represents 'pragma omp loop' directive.
Represents the 'pragma omp interchange' loop transformation directive.
This represents 'pragma omp interop' directive.
This represents 'pragma omp masked' directive.
This represents 'pragma omp masked taskloop' directive.
This represents 'pragma omp masked taskloop simd' directive.
This represents 'pragma omp master taskloop' directive.
This represents 'pragma omp master taskloop simd' directive.
This represents 'pragma omp metadirective' directive.
This represents 'pragma omp parallel loop' directive.
This represents 'pragma omp parallel masked taskloop' directive.
This represents 'pragma omp parallel masked taskloop simd' directive.
This represents 'pragma omp parallel master taskloop' directive.
This represents 'pragma omp parallel master taskloop simd' directive.
Represents the 'pragma omp reverse' loop transformation directive.
This represents 'pragma omp scan' directive.
This represents the 'pragma omp stripe' loop transformation directive.
This represents 'pragma omp target data' directive.
This represents 'pragma omp target' directive.
This represents 'pragma omp target enter data' directive.
This represents 'pragma omp target exit data' directive.
This represents 'pragma omp target parallel' directive.
This represents 'pragma omp target parallel for' directive.
This represents 'pragma omp target parallel for simd' directive.
This represents 'pragma omp target parallel loop' directive.
This represents 'pragma omp target simd' directive.
This represents 'pragma omp target teams' directive.
This represents 'pragma omp target teams distribute' combined directive.
This represents 'pragma omp target teams distribute parallel for' combined directive.
This represents 'pragma omp target teams distribute parallel for simd' combined directive.
This represents 'pragma omp target teams distribute simd' combined directive.
This represents 'pragma omp target teams loop' directive.
This represents 'pragma omp target update' directive.
This represents 'pragma omp taskloop' directive.
This represents 'pragma omp taskloop simd' directive.
This represents 'pragma omp teams' directive.
This represents 'pragma omp teams distribute' directive.
This represents 'pragma omp teams distribute parallel for' composite directive.
This represents 'pragma omp teams distribute parallel for simd' composite directive.
This represents 'pragma omp teams distribute simd' combined directive.
This represents 'pragma omp teams loop' directive.
This represents the 'pragma omp tile' loop transformation directive.
This represents the 'pragma omp unroll' loop transformation directive.
This class represents a 'loop' construct. The 'loop' construct applies to a 'for' loop (or range-for ...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3730
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3269
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6927
Represents an attribute applied to a statement.
Definition Stmt.h:2195
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition Expr.h:4456
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition Expr.h:4494
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition Expr.h:4491
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
BreakStmt - This represents a break.
Definition Stmt.h:3127
mlir::Value getPointer() const
Definition Address.h:96
static Address invalid()
Definition Address.h:74
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Value getBasePointer() const
Definition Address.h:101
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
An abstract representation of regular/ObjC call/message targets.
AbstractCallee(const clang::FunctionDecl *fd)
const clang::ParmVarDecl * getParamDecl(unsigned I) const
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
An object to manage conditionally-evaluated expressions.
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
mlir::OpBuilder::InsertPoint getInsertPoint() const
Returns the insertion point which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forReference(mlir::TypedAttr c)
static ConstantEmission forValue(mlir::TypedAttr c)
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, LValue lvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
RunCleanupsScope(CIRGenFunction &cgf)
Enter a new cleanup scope.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void restore()
Can be used to restore the state early, before the dtor is run.
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value)
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
static bool hasScalarEvaluationKind(clang::QualType type)
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
void emitOpenACCRoutine(const OpenACCRoutineDecl &d)
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, cir::GlobalOp gv, cir::GetGlobalOp gvAddr)
Add the initializer for 'd' to the global variable that has already been created for it.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo, GlobalDecl gd, const ThunkInfo &thunk, bool isUnprototyped)
Generate code for a thunk function.
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard SourceLocExprScopeGuard
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, mlir::OpBuilder::InsertPoint ip={})
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
ImplicitParamDecl * cxxabiThisDecl
CXXThisDecl - When generating code for a C++ member function, this will hold the implicit 'this' decl...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
bool curFuncIsThunk
In C++, whether we are code generating a thunk.
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
llvm::ScopedHashTable< const clang::Decl *, mlir::Value > SymTableTy
The symbol table maps a variable name to a value in the current scope.
void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc)
Definition CIRGenCXX.cpp:33
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, CallArgList &callArgs)
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
void emitOMPRequiresDecl(const OMPRequiresDecl &d)
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
Address cxxDefaultInitExprThis
The value of 'this' to sue when evaluating CXXDefaultInitExprs within this expression.
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
void setBeforeOutermostConditional(mlir::Value value, Address addr)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::Value loadCXXThis()
Load the value for 'this'.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void addCatchHandlerAttr(const CXXCatchStmt *catchStmt, SmallVector< mlir::Attribute > &handlerAttrs)
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
const clang::Decl * curFuncDecl
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
LValue emitLValueForLambdaField(const FieldDecl *field)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOrTryCall=nullptr)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
bool isTrivialInitializer(const Expr *init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void emitOpenACCDeclare(const OpenACCDeclareDecl &d)
Address getAddrOfLocalVar(const clang::VarDecl *vd)
Return the address of a local variable.
void emitAnyExprToExn(const Expr *e, Address addr)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr, cir::FuncOp callee)
Emit a musttail call for a thunk with a potentially different ABI.
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy)
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, bool delegating)
Return the VTT parameter that should be passed to a base constructor/destructor with virtual bases.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
void initializeVTablePointers(mlir::Location loc, const clang::CXXRecordDecl *rd)
mlir::Type convertType(const TypeDecl *t)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d)
void emitAggregateStore(mlir::Value value, Address dest)
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
ConditionalEvaluation * outermostConditional
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit)
RValue emitAtomicExpr(AtomicExpr *e)
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, LValue lvalue, bool capturedByInit=false)
Emit an expression as an initializer for an object (variable, field, etc.) at the given location.
void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp, bool performInit)
Emit a guarded initializer for a static local variable or a static data member of a class template in...
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass)
const TargetCIRGenInfo & getTargetHooks() const
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
RValue emitCoyieldExpr(const CoyieldExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e)
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
CleanupKind getCleanupKind(QualType::DestructionKind kind)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
std::pair< mlir::Value, mlir::Type > emitAsmInputLValue(const TargetInfo::ConstraintInfo &info, LValue inputValue, QualType inputType, std::string &constraintString, SourceLocation loc)
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
void emitOMPAllocateDecl(const OMPAllocateDecl &d)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType)
ImplicitParamDecl * cxxStructorImplicitParamDecl
When generating code for a constructor or destructor, this will hold the implicit argument (e....
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
void startThunk(cir::FuncOp fn, GlobalDecl gd, const CIRGenFunctionInfo &fnInfo, bool isUnprototyped)
Start generating a thunk function.
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
LValue emitAggExprToLValue(const Expr *e)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
clang::CurrentSourceLocExprScope curSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
mlir::Value loadCXXVTT()
Load the VTT parameter to base constructors/destructors have virtual bases.
void emitVarDecl(const clang::VarDecl &d)
This method handles emission of any variable declaration inside a function, including static vars etc...
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts, mlir::Location loc)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr, SmallVectorImpl< mlir::Value > &ops, clang::SVETypeFlags typeFlags)
Address returnValue
The temporary alloca to hold the return value.
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo)
void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk, bool isUnprototyped)
Emit the call and return for a thunk function.
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr, const clang::CXXRecordDecl *vtableClass)
Return the Value of the vtable pointer member pointed to by thisAddr.
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Destroys all the elements of the given array, beginning from last to first.
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
RValue emitAnyExprToTemp(const clang::Expr *e)
Similarly to emitAnyExpr(), however, the result will always be accessible even if no aggregate locati...
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS)
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::SmallPtrSet< const clang::CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
bool hasVolatileMember(QualType t)
returns true if aggregate type has a volatile member.
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
void emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType)
clang::FieldDecl * lambdaThisCaptureField
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
void emitConstructorBody(FunctionArgList &args)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
bool haveInsertPoint() const
True if an insertion point is defined.
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void maybeEmitDeferredVarDeclInit(const VarDecl *vd)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
std::optional< mlir::Value > emitAArch64SMEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void pushStackRestore(CleanupKind kind, Address spMem)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
void emitAutoVarDecl(const clang::VarDecl &d)
Emit code and set up symbol table for a variable declaration with auto, register, or no storage class...
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
bool didCallStackSave
Whether a cir.stacksave operation has been added.
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
mlir::Value emitOpenACCIntExpr(const Expr *intExpr)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer)
Immediately perform the destruction of the given object.
void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc, AbstractCallee ac, unsigned paramNum)
Create a check for a function parameter that may potentially be declared as non-null.
const CIRGenModule & getCIRGenModule() const
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, llvm::ArrayRef< mlir::Value > args={}, mlir::NamedAttrList attrs={})
void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty)
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
void emitAtomicInit(Expr *init, LValue dest)
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, int64_t value)
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d)
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
void emitCXXThrowExpr(const CXXThrowExpr *e)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
llvm::SmallVector< VPtr, 4 > VPtrsVector
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
bool sawAsmBlock
Whether or not a Microsoft-style asm block has been processed within this fuction.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
std::pair< mlir::Value, mlir::Type > emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr, std::string &constraintString)
EHScopeStack::stable_iterator currentCleanupStackDepth
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
void pushFullExprCleanup(CleanupKind kind, As... a)
Push a cleanup to be run at the end of the current full-expression.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc)
We are performing a delegate call; that is, the current function is delegating to another one.
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
const CIRGenFunctionInfo * curFnInfo
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
std::optional< mlir::Value > emitAArch64SVEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args)
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize=nullptr)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
void finishThunk()
Finish generating a thunk function.
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
void emitAutoVarCleanups(const AutoVarEmission &emission)
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
A saved depth on the scope stack.
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a call to a CUDA kernel function.
Definition ExprCXX.h:235
CXXCatchStmt - This represents a C++ catch block.
Definition StmtCXX.h:28
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
Represents a C++ constructor within a class.
Definition DeclCXX.h:2611
Represents a C++ base or member initializer.
Definition DeclCXX.h:2376
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1271
A use of a default initializer in a constructor or in aggregate initialization.
Definition ExprCXX.h:1378
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2627
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:482
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2356
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:85
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2746
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
Represents a C++ temporary.
Definition ExprCXX.h:1460
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1209
CXXTryStmt - A C++ try block, including all handlers.
Definition StmtCXX.h:69
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
CaseStmt - Represent a case statement.
Definition Stmt.h:1912
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
ContinueStmt - This represents a continue.
Definition Stmt.h:3111
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
SourceLocExprScopeGuard(const Expr *DefaultExpr, CurrentSourceLocExprScope &Current)
Represents the current source location and context used to determine the value of the source location...
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1623
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2824
This represents one expression.
Definition Expr.h:112
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2880
Represents a function declaration or definition.
Definition Decl.h:2000
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5315
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2961
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3000
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3661
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents 'pragma omp allocate ...' directive.
Definition DeclOpenMP.h:536
Pseudo declaration for capturing expressions.
Definition DeclOpenMP.h:445
This represents 'pragma omp declare mapper ...' directive.
Definition DeclOpenMP.h:349
This represents 'pragma omp declare reduction ...' directive.
Definition DeclOpenMP.h:239
This represents 'pragma omp groupprivate ...' directive.
Definition DeclOpenMP.h:173
This represents 'pragma omp requires...' directive.
Definition DeclOpenMP.h:479
This represents 'pragma omp threadprivate ...' directive.
Definition DeclOpenMP.h:110
ObjCMethodDecl - Represents an instance or class method declaration.
Definition DeclObjC.h:140
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
Represents a parameter to a function.
Definition Decl.h:1790
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8428
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Flags to identify the types for overloaded SVE builtins.
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:226
Represents a declaration of a type.
Definition Decl.h:3513
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3974
#define bool
Definition gpuintrin.h:32
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
const internal::VariadicDynCastAllOfMatcher< Decl, VarDecl > varDecl
Matches variable declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
OpenACCDirectiveKind
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start, SourceLocation DirectiveLoc, SourceLocation End, ArrayRef< const OpenACCClause * > Clauses, Stmt *StructuredBlock)
CXXDtorType
C++ destructor types.
Definition ABI.h:34
#define true
Definition stdbool.h:25
static bool aggValueSlot()
static bool peepholeProtection()
static bool opAllocaEscapeByReference()
static bool generateDebugInfo()
AutoVarEmission(const clang::VarDecl &variable)
bool isEscapingByRef
True if the variable is a __block variable that is captured by an escaping block.
Address addr
The address of the alloca for languages with explicit address space (e.g.
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
bool isConstantAggregate
True if the variable is of aggregate type and has a constant initializer.
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
Address getObjectAddress(CIRGenFunction &cgf) const
Returns the address of the object within this declaration.
std::unique_ptr< CGCoroData > data
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
mlir::Block * createCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc)
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
void updateRetLoc(mlir::Block *b, mlir::Location loc)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
llvm::PointerUnion< const clang::FunctionProtoType *, const clang::ObjCMethodDecl * > p
PrototypeWrapper(const clang::ObjCMethodDecl *md)
PrototypeWrapper(const clang::FunctionProtoType *ft)
const clang::CXXRecordDecl * vtableClass
const clang::CXXRecordDecl * nearestVBase
VlaSizePair(mlir::Value num, QualType ty)
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition Thunk.h:157