clang 23.0.0git
CIRGenFunction.h
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
14#define CLANG_LIB_CIR_CODEGEN_CIRGENFUNCTION_H
15
16#include "CIRGenBuilder.h"
17#include "CIRGenCall.h"
18#include "CIRGenModule.h"
19#include "CIRGenTypeCache.h"
20#include "CIRGenValue.h"
21#include "EHScopeStack.h"
22
23#include "Address.h"
24
27#include "clang/AST/CharUnits.h"
29#include "clang/AST/Decl.h"
30#include "clang/AST/ExprCXX.h"
31#include "clang/AST/Stmt.h"
32#include "clang/AST/Type.h"
38#include "llvm/ADT/ScopedHashTable.h"
39#include "llvm/IR/Instructions.h"
40
41namespace {
42class ScalarExprEmitter;
43} // namespace
44
45namespace mlir {
46namespace acc {
47class LoopOp;
48} // namespace acc
49} // namespace mlir
50
51namespace clang::CIRGen {
52
53struct CGCoroData;
54
56public:
58
59private:
60 friend class ::ScalarExprEmitter;
61 /// The builder is a helper class to create IR inside a function. The
62 /// builder is stateful, in particular it keeps an "insertion point": this
63 /// is where the next operations will be introduced.
64 CIRGenBuilderTy &builder;
65
66public:
67 /// The GlobalDecl for the current function being compiled or the global
68 /// variable currently being initialized.
70
72
73 /// The compiler-generated variable that holds the return value.
74 std::optional<mlir::Value> fnRetAlloca;
75
76 // Holds coroutine data if the current function is a coroutine. We use a
77 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
78 // in this header.
79 struct CGCoroInfo {
80 std::unique_ptr<CGCoroData> data;
81 CGCoroInfo();
83 };
85
86 bool isCoroutine() const { return curCoro.data != nullptr; }
87
88 /// The temporary alloca to hold the return value. This is
89 /// invalid iff the function has no return value.
91
92 /// Tracks function scope overall cleanup handling.
94
96
97 /// A mapping from NRVO variables to the flags used to indicate
98 /// when the NRVO has been applied to this variable.
99 llvm::DenseMap<const VarDecl *, mlir::Value> nrvoFlags;
100
101 llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
104
105 /// CXXThisDecl - When generating code for a C++ member function,
106 /// this will hold the implicit 'this' declaration.
108 mlir::Value cxxabiThisValue = nullptr;
109 mlir::Value cxxThisValue = nullptr;
111
112 /// When generating code for a constructor or destructor, this will hold the
113 /// implicit argument (e.g. VTT).
116
117 /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this
118 /// expression.
120
121 // Holds the Decl for the current outermost non-closure context
122 const clang::Decl *curFuncDecl = nullptr;
123 /// This is the inner-most code context, which includes blocks.
124 const clang::Decl *curCodeDecl = nullptr;
126
127 /// The current function or global initializer that is generated code for.
128 /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for
129 /// global initializers.
130 mlir::Operation *curFn = nullptr;
131
132 /// Save Parameter Decl for coroutine.
134
135 using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
136 /// This keeps track of the CIR allocas or globals for local C
137 /// declarations.
139
140 /// The type of the condition for the emitting switch statement.
142
143 clang::ASTContext &getContext() const { return cgm.getASTContext(); }
144
145 CIRGenBuilderTy &getBuilder() { return builder; }
146
148 const CIRGenModule &getCIRGenModule() const { return cgm; }
149
151 // We currently assume this isn't called for a global initializer.
152 auto fn = mlir::cast<cir::FuncOp>(curFn);
153 return &fn.getRegion().front();
154 }
155
156 /// Sanitizers enabled for this function.
158
160 public:
164
165 private:
166 void ConstructorHelper(clang::FPOptions FPFeatures);
167 CIRGenFunction &cgf;
168 clang::FPOptions oldFPFeatures;
169 llvm::fp::ExceptionBehavior oldExcept;
170 llvm::RoundingMode oldRounding;
171 };
173
174 /// The symbol table maps a variable name to a value in the current scope.
175 /// Entering a function creates a new scope, and the function arguments are
176 /// added to the mapping. When the processing of a function is terminated,
177 /// the scope is destroyed and the mappings created in this scope are
178 /// dropped.
179 using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
181
182 /// Whether a cir.stacksave operation has been added. Used to avoid
183 /// inserting cir.stacksave for multiple VLAs in the same scope.
184 bool didCallStackSave = false;
185
186 /// Whether or not a Microsoft-style asm block has been processed within
187 /// this fuction. These can potentially set the return value.
188 bool sawAsmBlock = false;
189
190 /// In C++, whether we are code generating a thunk. This controls whether we
191 /// should emit cleanups.
192 bool curFuncIsThunk = false;
193
194 mlir::Type convertTypeForMem(QualType t);
195
196 mlir::Type convertType(clang::QualType t);
197 mlir::Type convertType(const TypeDecl *t) {
198 return convertType(getContext().getTypeDeclType(t));
199 }
200
201 /// Get integer from a mlir::Value that is an int constant or a constant op.
202 static int64_t getSExtIntValueFromConstOp(mlir::Value val) {
203 auto constOp = val.getDefiningOp<cir::ConstantOp>();
204 assert(constOp && "getSExtIntValueFromConstOp call with non ConstantOp");
205 return constOp.getIntValue().getSExtValue();
206 }
207
208 /// Get zero-extended integer from a mlir::Value that is an int constant or a
209 /// constant op.
210 static int64_t getZExtIntValueFromConstOp(mlir::Value val) {
211 auto constOp = val.getDefiningOp<cir::ConstantOp>();
212 assert(constOp && "getZExtIntValueFromConstOp call with non ConstantOp");
213 return constOp.getIntValue().getZExtValue();
214 }
215
216 /// Return the cir::TypeEvaluationKind of QualType \c type.
218
222
226
228 bool suppressNewContext = false);
230
231 CIRGenTypes &getTypes() const { return cgm.getTypes(); }
232
233 const TargetInfo &getTarget() const { return cgm.getTarget(); }
234 mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
235
237 return cgm.getTargetCIRGenInfo();
238 }
239
240 // ---------------------
241 // Opaque value handling
242 // ---------------------
243
244 /// Keeps track of the current set of opaque value expressions.
245 llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
246 llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
247
248 // This keeps track of the associated size for each VLA type.
249 // We track this by the size expression rather than the type itself because
250 // in certain situations, like a const qualifier applied to an VLA typedef,
251 // multiple VLA types can share the same size expression.
252 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
253 // enter/leave scopes.
254 llvm::DenseMap<const Expr *, mlir::Value> vlaSizeMap;
255
256public:
257 /// A non-RAII class containing all the information about a bound
258 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
259 /// this which makes individual mappings very simple; using this
260 /// class directly is useful when you have a variable number of
261 /// opaque values or don't want the RAII functionality for some
262 /// reason.
263 class OpaqueValueMappingData {
264 const OpaqueValueExpr *opaqueValue;
265 bool boundLValue;
266
267 OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
268 : opaqueValue(ov), boundLValue(boundLValue) {}
269
270 public:
271 OpaqueValueMappingData() : opaqueValue(nullptr) {}
272
273 static bool shouldBindAsLValue(const Expr *expr) {
274 // gl-values should be bound as l-values for obvious reasons.
275 // Records should be bound as l-values because IR generation
276 // always keeps them in memory. Expressions of function type
277 // act exactly like l-values but are formally required to be
278 // r-values in C.
279 return expr->isGLValue() || expr->getType()->isFunctionType() ||
281 }
282
284 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
285 if (shouldBindAsLValue(ov))
286 return bind(cgf, ov, cgf.emitLValue(e));
287 return bind(cgf, ov, cgf.emitAnyExpr(e));
288 }
289
291 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
292 assert(shouldBindAsLValue(ov));
293 cgf.opaqueLValues.insert(std::make_pair(ov, lv));
294 return OpaqueValueMappingData(ov, true);
295 }
296
298 bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
299 assert(!shouldBindAsLValue(ov));
300 cgf.opaqueRValues.insert(std::make_pair(ov, rv));
301
302 OpaqueValueMappingData data(ov, false);
303
304 // Work around an extremely aggressive peephole optimization in
305 // EmitScalarConversion which assumes that all other uses of a
306 // value are extant.
308 return data;
309 }
310
311 bool isValid() const { return opaqueValue != nullptr; }
312 void clear() { opaqueValue = nullptr; }
313
315 assert(opaqueValue && "no data to unbind!");
316
317 if (boundLValue) {
318 cgf.opaqueLValues.erase(opaqueValue);
319 } else {
320 cgf.opaqueRValues.erase(opaqueValue);
322 }
323 }
324 };
325
326 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
328 CIRGenFunction &cgf;
330
331 public:
335
336 /// Build the opaque value mapping for the given conditional
337 /// operator if it's the GNU ?: extension. This is a common
338 /// enough pattern that the convenience operator is really
339 /// helpful.
340 ///
343 : cgf(cgf) {
344 if (mlir::isa<ConditionalOperator>(op))
345 // Leave Data empty.
346 return;
347
349 mlir::cast<BinaryConditionalOperator>(op);
351 e->getCommon());
352 }
353
354 /// Build the opaque value mapping for an OpaqueValueExpr whose source
355 /// expression is set to the expression the OVE represents.
357 : cgf(cgf) {
358 if (ov) {
359 assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
360 "for OVE with no source expression");
361 data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
362 }
363 }
364
366 LValue lvalue)
367 : cgf(cgf),
368 data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
369
371 RValue rvalue)
372 : cgf(cgf),
373 data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
374
375 void pop() {
376 data.unbind(cgf);
377 data.clear();
378 }
379
381 if (data.isValid())
382 data.unbind(cgf);
383 }
384 };
385
386private:
387 /// Declare a variable in the current scope, return success if the variable
388 /// wasn't declared yet.
389 void declare(mlir::Value addrVal, const clang::Decl *var, clang::QualType ty,
390 mlir::Location loc, clang::CharUnits alignment,
391 bool isParam = false);
392
393public:
394 mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt);
395
396 void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty);
397
398private:
399 // Track current variable initialization (if there's one)
400 const clang::VarDecl *currVarDecl = nullptr;
401 class VarDeclContext {
403 const clang::VarDecl *oldVal = nullptr;
404
405 public:
406 VarDeclContext(CIRGenFunction &p, const VarDecl *value) : p(p) {
407 if (p.currVarDecl)
408 oldVal = p.currVarDecl;
409 p.currVarDecl = value;
410 }
411
412 /// Can be used to restore the state early, before the dtor
413 /// is run.
414 void restore() { p.currVarDecl = oldVal; }
415 ~VarDeclContext() { restore(); }
416 };
417
418public:
419 /// Use to track source locations across nested visitor traversals.
420 /// Always use a `SourceLocRAIIObject` to change currSrcLoc.
421 std::optional<mlir::Location> currSrcLoc;
423 CIRGenFunction &cgf;
424 std::optional<mlir::Location> oldLoc;
425
426 public:
427 SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value) : cgf(cgf) {
428 if (cgf.currSrcLoc)
429 oldLoc = cgf.currSrcLoc;
430 cgf.currSrcLoc = value;
431 }
432
433 /// Can be used to restore the state early, before the dtor
434 /// is run.
435 void restore() { cgf.currSrcLoc = oldLoc; }
437 };
438
440 llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
441
442 /// Hold counters for incrementally naming temporaries
443 unsigned counterRefTmp = 0;
444 unsigned counterAggTmp = 0;
445 std::string getCounterRefTmpAsString();
446 std::string getCounterAggTmpAsString();
447
448 /// Helpers to convert Clang's SourceLocation to a MLIR Location.
449 mlir::Location getLoc(clang::SourceLocation srcLoc);
450 mlir::Location getLoc(clang::SourceRange srcLoc);
451 mlir::Location getLoc(mlir::Location lhs, mlir::Location rhs);
452
453 const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
454
455 /// True if an insertion point is defined. If not, this indicates that the
456 /// current code being emitted is unreachable.
457 /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
458 /// since we don't yet force null insertion point to designate behavior (like
459 /// LLVM's codegen does) and we probably shouldn't.
460 bool haveInsertPoint() const {
461 return builder.getInsertionBlock() != nullptr;
462 }
463
464 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
465 // an ObjCMethodDecl.
467 llvm::PointerUnion<const clang::FunctionProtoType *,
468 const clang::ObjCMethodDecl *>
470
473 };
474
476
477 /// An abstract representation of regular/ObjC call/message targets.
479 /// The function declaration of the callee.
480 [[maybe_unused]] const clang::Decl *calleeDecl;
481
482 public:
483 AbstractCallee() : calleeDecl(nullptr) {}
484 AbstractCallee(const clang::FunctionDecl *fd) : calleeDecl(fd) {}
485
486 bool hasFunctionDecl() const {
487 return llvm::isa_and_nonnull<clang::FunctionDecl>(calleeDecl);
488 }
489
490 unsigned getNumParams() const {
491 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
492 return fd->getNumParams();
493 return llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_size();
494 }
495
496 const clang::ParmVarDecl *getParamDecl(unsigned I) const {
497 if (const auto *fd = llvm::dyn_cast<clang::FunctionDecl>(calleeDecl))
498 return fd->getParamDecl(I);
499 return *(llvm::cast<clang::ObjCMethodDecl>(calleeDecl)->param_begin() +
500 I);
501 }
502 };
503
504 struct VlaSizePair {
505 mlir::Value numElts;
507
508 VlaSizePair(mlir::Value num, QualType ty) : numElts(num), type(ty) {}
509 };
510
511 /// Return the number of elements for a single dimension
512 /// for the given array type.
513 VlaSizePair getVLAElements1D(const VariableArrayType *vla);
514
515 /// Returns an MLIR::Value+QualType pair that corresponds to the size,
516 /// in non-variably-sized elements, of a variable length array type,
517 /// plus that largest non-variably-sized element type. Assumes that
518 /// the type has already been emitted with emitVariablyModifiedType.
519 VlaSizePair getVLASize(const VariableArrayType *type);
520 VlaSizePair getVLASize(QualType type);
521
523
524 mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType) {
525 return getAsNaturalAddressOf(addr, pointeeType).getBasePointer();
526 }
527
528 void finishFunction(SourceLocation endLoc);
529
530 /// Determine whether the given initializer is trivial in the sense
531 /// that it requires no code to be generated.
532 bool isTrivialInitializer(const Expr *init);
533
534 /// If the specified expression does not fold to a constant, or if it does but
535 /// contains a label, return false. If it constant folds return true and set
536 /// the boolean result in Result.
537 bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool,
538 bool allowLabels = false);
540 llvm::APSInt &resultInt,
541 bool allowLabels = false);
542
543 /// Return true if the statement contains a label in it. If
544 /// this statement is not executed normally, it not containing a label means
545 /// that we can just remove the code.
546 bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts = false);
547
548 Address emitExtVectorElementLValue(LValue lv, mlir::Location loc);
549
550 class ConstantEmission {
551 // Cannot use mlir::TypedAttr directly here because of bit availability.
552 llvm::PointerIntPair<mlir::Attribute, 1, bool> valueAndIsReference;
553 ConstantEmission(mlir::TypedAttr c, bool isReference)
554 : valueAndIsReference(c, isReference) {}
555
556 public:
558 static ConstantEmission forReference(mlir::TypedAttr c) {
559 return ConstantEmission(c, true);
560 }
561 static ConstantEmission forValue(mlir::TypedAttr c) {
562 return ConstantEmission(c, false);
563 }
564
565 explicit operator bool() const {
566 return valueAndIsReference.getOpaqueValue() != nullptr;
567 }
568
569 bool isReference() const { return valueAndIsReference.getInt(); }
571 assert(isReference());
572 cgf.cgm.errorNYI(refExpr->getSourceRange(),
573 "ConstantEmission::getReferenceLValue");
574 return {};
575 }
576
577 mlir::TypedAttr getValue() const {
578 assert(!isReference());
579 return mlir::cast<mlir::TypedAttr>(valueAndIsReference.getPointer());
580 }
581 };
582
583 ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
584 ConstantEmission tryEmitAsConstant(const MemberExpr *me);
585
588 /// The address of the alloca for languages with explicit address space
589 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
590 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
591 /// as a global constant.
593
594 /// True if the variable is of aggregate type and has a constant
595 /// initializer.
597
598 /// True if the variable is a __block variable that is captured by an
599 /// escaping block.
600 bool isEscapingByRef = false;
601
602 /// True if the variable was emitted as an offload recipe, and thus doesn't
603 /// have the same sort of alloca initialization.
604 bool emittedAsOffload = false;
605
606 mlir::Value nrvoFlag{};
607
608 struct Invalid {};
610
613
615
616 bool wasEmittedAsGlobal() const { return !addr.isValid(); }
617
619
620 /// Returns the raw, allocated address, which is not necessarily
621 /// the address of the object itself. It is casted to default
622 /// address space for address space agnostic languages.
623 Address getAllocatedAddress() const { return addr; }
624
625 // Changes the stored address for the emission. This function should only
626 // be used in extreme cases, and isn't required to model normal AST
627 // initialization/variables.
629
630 /// Returns the address of the object within this declaration.
631 /// Note that this does not chase the forwarding pointer for
632 /// __block decls.
634 if (!isEscapingByRef)
635 return addr;
636
638 return Address::invalid();
639 }
640 };
641
642 /// IndirectBranch - The first time an indirect goto is seen we create a block
643 /// reserved for the indirect branch. Unlike before,the actual 'indirectbr'
644 /// is emitted at the end of the function, once all block destinations have
645 /// been resolved.
646 mlir::Block *indirectGotoBlock = nullptr;
647
650
651 /// Perform the usual unary conversions on the specified expression and
652 /// compare the result against zero, returning an Int1Ty value.
653 mlir::Value evaluateExprAsBool(const clang::Expr *e);
654
655 cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d,
656 cir::GlobalOp gv,
657 cir::GetGlobalOp gvAddr);
658
659 /// Enter the cleanups necessary to complete the given phase of destruction
660 /// for a destructor. The end result should call destructors on members and
661 /// base classes in reverse order of their construction.
663
664 /// Determines whether an EH cleanup is required to destroy a type
665 /// with the given destruction kind.
666 /// TODO(cir): could be shared with Clang LLVM codegen
668 switch (kind) {
670 return false;
674 return getLangOpts().Exceptions;
676 return getLangOpts().Exceptions &&
677 cgm.getCodeGenOpts().ObjCAutoRefCountExceptions;
678 }
679 llvm_unreachable("bad destruction kind");
680 }
681
685
687
688 /// Set the address of a local variable.
690 assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
691 localDeclMap.insert({vd, addr});
692
693 // Add to the symbol table if not there already.
694 if (symbolTable.count(vd))
695 return;
696 symbolTable.insert(vd, addr.getPointer());
697 }
698
699 // Replaces the address of the local variable, if it exists. Else does the
700 // same thing as setAddrOfLocalVar.
702 localDeclMap.insert_or_assign(vd, addr);
703 }
704
705 // A class to allow reverting changes to a var-decl's registration to the
706 // localDeclMap. This is used in cases where things are being inserted into
707 // the variable list but don't follow normal lookup/search rules, like in
708 // OpenACC recipe generation.
710 CIRGenFunction &cgf;
711 const VarDecl *vd;
712 bool shouldDelete = false;
713 Address oldAddr = Address::invalid();
714
715 public:
717 : cgf(cgf), vd(vd) {
718 auto mapItr = cgf.localDeclMap.find(vd);
719
720 if (mapItr != cgf.localDeclMap.end())
721 oldAddr = mapItr->second;
722 else
723 shouldDelete = true;
724 }
725
727 if (shouldDelete)
728 cgf.localDeclMap.erase(vd);
729 else
730 cgf.localDeclMap.insert_or_assign(vd, oldAddr);
731 }
732 };
733
735
738
739 static bool
741
748
751
755 const clang::CXXRecordDecl *nearestVBase,
756 clang::CharUnits offsetFromNearestVBase,
757 bool baseIsNonVirtualPrimaryBase,
758 const clang::CXXRecordDecl *vtableClass,
759 VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
760 /// Return the Value of the vtable pointer member pointed to by thisAddr.
761 mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
762 const clang::CXXRecordDecl *vtableClass);
763
764 /// Returns whether we should perform a type checked load when loading a
765 /// virtual function for virtual calls to members of RD. This is generally
766 /// true when both vcall CFI and whole-program-vtables are enabled.
768
769 /// Source location information about the default argument or member
770 /// initializer expression we're evaluating, if any.
774
775 /// A scope within which we are constructing the fields of an object which
776 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
777 /// we need to evaluate the CXXDefaultInitExpr within the evaluation.
779 public:
781 : cgf(cgf), oldCXXDefaultInitExprThis(cgf.cxxDefaultInitExprThis) {
782 cgf.cxxDefaultInitExprThis = thisAddr;
783 }
785 cgf.cxxDefaultInitExprThis = oldCXXDefaultInitExprThis;
786 }
787
788 private:
789 CIRGenFunction &cgf;
790 Address oldCXXDefaultInitExprThis;
791 };
792
793 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
794 /// is overridden to be the object under construction.
796 public:
801 cgf.cxxThisValue = cgf.cxxDefaultInitExprThis.getPointer();
802 cgf.cxxThisAlignment = cgf.cxxDefaultInitExprThis.getAlignment();
803 }
805 cgf.cxxThisValue = oldCXXThisValue;
806 cgf.cxxThisAlignment = oldCXXThisAlignment;
807 }
808
809 public:
811 mlir::Value oldCXXThisValue;
814 };
815
820
822 LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
823
824 /// Construct an address with the natural alignment of T. If a pointer to T
825 /// is expected to be signed, the pointer passed to this function must have
826 /// been signed, and the returned Address will have the pointer authentication
827 /// information needed to authenticate the signed pointer.
829 CharUnits alignment,
830 bool forPointeeType = false,
831 LValueBaseInfo *baseInfo = nullptr) {
832 if (alignment.isZero())
833 alignment = cgm.getNaturalTypeAlignment(t, baseInfo);
834 return Address(ptr, convertTypeForMem(t), alignment);
835 }
836
838 Address value, const CXXRecordDecl *derived,
839 llvm::iterator_range<CastExpr::path_const_iterator> path,
840 bool nullCheckValue, SourceLocation loc);
841
843 mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived,
844 llvm::iterator_range<CastExpr::path_const_iterator> path,
845 bool nullCheckValue);
846
847 /// Return the VTT parameter that should be passed to a base
848 /// constructor/destructor with virtual bases.
849 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
850 /// to ItaniumCXXABI.cpp together with all the references to VTT.
851 mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase,
852 bool delegating);
853
856 return makeAddrLValue(addr, ty, LValueBaseInfo(source));
857 }
858
860 return LValue::makeAddr(addr, ty, baseInfo);
861 }
862
863 void initializeVTablePointers(mlir::Location loc,
864 const clang::CXXRecordDecl *rd);
865 void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
866
868
869 /// Return the address of a local variable.
871 auto it = localDeclMap.find(vd);
872 assert(it != localDeclMap.end() &&
873 "Invalid argument to getAddrOfLocalVar(), no decl!");
874 return it->second;
875 }
876
878 mlir::Type fieldType, unsigned index);
879
880 /// Given an opaque value expression, return its LValue mapping if it exists,
881 /// otherwise create one.
883
884 /// Given an opaque value expression, return its RValue mapping if it exists,
885 /// otherwise create one.
887
888 /// Load the value for 'this'. This function is only valid while generating
889 /// code for an C++ member function.
890 /// FIXME(cir): this should return a mlir::Value!
891 mlir::Value loadCXXThis() {
892 assert(cxxThisValue && "no 'this' value for this function");
893 return cxxThisValue;
894 }
896
897 /// Load the VTT parameter to base constructors/destructors have virtual
898 /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to
899 /// be abstracted properly.
900 mlir::Value loadCXXVTT() {
901 assert(cxxStructorImplicitParamValue && "no VTT value for this function");
903 }
904
905 /// Convert the given pointer to a complete class to the given direct base.
907 Address value,
908 const CXXRecordDecl *derived,
909 const CXXRecordDecl *base,
910 bool baseIsVirtual);
911
912 /// Determine whether a return value slot may overlap some other object.
914 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
915 // class subobjects. These cases may need to be revisited depending on the
916 // resolution of the relevant core issue.
918 }
919
920 /// Determine whether a base class initialization may overlap some other
921 /// object.
923 const CXXRecordDecl *baseRD,
924 bool isVirtual);
925
926 /// Get an appropriate 'undef' rvalue for the given type.
927 /// TODO: What's the equivalent for MLIR? Currently we're only using this for
928 /// void types so it just returns RValue::get(nullptr) but it'll need
929 /// addressed later.
931
932 cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
933 cir::FuncType funcType);
934
936 FunctionArgList &args);
937
938 /// Emit the function prologue: declare function arguments in the symbol
939 /// table.
940 void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB,
941 const FunctionDecl *fd, SourceLocation bodyBeginLoc);
942
943 /// Emit code for the start of a function.
944 /// \param loc The location to be associated with the function.
945 /// \param startLoc The location of the function body.
947 cir::FuncOp fn, cir::FuncType funcType,
949 clang::SourceLocation startLoc);
950
951 /// returns true if aggregate type has a volatile member.
953 if (const auto *rd = t->getAsRecordDecl())
954 return rd->hasVolatileMember();
955 return false;
956 }
957
958 void addCatchHandlerAttr(const CXXCatchStmt *catchStmt,
959 SmallVector<mlir::Attribute> &handlerAttrs);
960
961 /// The cleanup depth enclosing all the cleanups associated with the
962 /// parameters.
964
966
967 /// Takes the old cleanup stack size and emits the cleanup blocks
968 /// that have been added.
969 void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
970 ArrayRef<mlir::Value *> valuesToReload = {});
971 void popCleanupBlock();
972
973 /// Deactivates the given cleanup block. The block cannot be reactivated. Pops
974 /// it if it's the top of the stack.
975 ///
976 /// \param DominatingIP - An instruction which is known to
977 /// dominate the current IP (if set) and which lies along
978 /// all paths of execution between the current IP and the
979 /// the point at which the cleanup comes into scope.
981 mlir::Operation *dominatingIP);
982
983 /// Push a cleanup to be run at the end of the current full-expression. Safe
984 /// against the possibility that we're currently inside a
985 /// conditionally-evaluated expression.
986 template <class T, class... As>
988 // If we're not in a conditional branch, or if none of the
989 // arguments requires saving, then use the unconditional cleanup.
991 return ehStack.pushCleanup<T>(kind, a...);
992
993 cgm.errorNYI("pushFullExprCleanup in conditional branch");
994 }
995
996 /// Enters a new scope for capturing cleanups, all of which
997 /// will be executed once the scope is exited.
998 class RunCleanupsScope {
999 EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
1000
1001 protected:
1004
1005 private:
1006 RunCleanupsScope(const RunCleanupsScope &) = delete;
1007 void operator=(const RunCleanupsScope &) = delete;
1008
1009 protected:
1011
1012 public:
1013 /// Enter a new cleanup scope.
1015 : performCleanup(true), cgf(cgf) {
1016 cleanupStackDepth = cgf.ehStack.stable_begin();
1017 oldDidCallStackSave = cgf.didCallStackSave;
1018 cgf.didCallStackSave = false;
1019 oldCleanupStackDepth = cgf.currentCleanupStackDepth;
1020 cgf.currentCleanupStackDepth = cleanupStackDepth;
1021 }
1022
1023 /// Exit this cleanup scope, emitting any accumulated cleanups.
1025 if (performCleanup)
1026 forceCleanup();
1027 }
1028
1029 /// Force the emission of cleanups now, instead of waiting
1030 /// until this object is destroyed.
1031 void forceCleanup(ArrayRef<mlir::Value *> valuesToReload = {}) {
1032 assert(performCleanup && "Already forced cleanup");
1034 cgf.popCleanupBlocks(cleanupStackDepth, valuesToReload);
1035 performCleanup = false;
1036 cgf.currentCleanupStackDepth = oldCleanupStackDepth;
1037 }
1038 };
1039
1040 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1042
1043public:
1044 /// Represents a scope, including function bodies, compound statements, and
1045 /// the substatements of if/while/do/for/switch/try statements. This class
1046 /// handles any automatic cleanup, along with the return value.
1047 struct LexicalScope : public RunCleanupsScope {
1048 private:
1049 // Block containing cleanup code for things initialized in this
1050 // lexical context (scope).
1051 mlir::Block *cleanupBlock = nullptr;
1052
1053 // Points to the scope entry block. This is useful, for instance, for
1054 // helping to insert allocas before finalizing any recursive CodeGen from
1055 // switches.
1056 mlir::Block *entryBlock;
1057
1058 LexicalScope *parentScope = nullptr;
1059
1060 // Holds the actual value for ScopeKind::Try
1061 cir::TryOp tryOp = nullptr;
1062
1063 // On a coroutine body, the OnFallthrough sub stmt holds the handler
1064 // (CoreturnStmt) for control flow falling off the body. Keep track
1065 // of emitted co_return in this scope and allow OnFallthrough to be
1066 // skipeed.
1067 bool hasCoreturnStmt = false;
1068
1069 // Only Regular is used at the moment. Support for other kinds will be
1070 // added as the relevant statements/expressions are upstreamed.
1071 enum Kind {
1072 Regular, // cir.if, cir.scope, if_regions
1073 Ternary, // cir.ternary
1074 Switch, // cir.switch
1075 Try, // cir.try
1076 GlobalInit // cir.global initialization code
1077 };
1078 Kind scopeKind = Kind::Regular;
1079
1080 // The scope return value.
1081 mlir::Value retVal = nullptr;
1082
1083 mlir::Location beginLoc;
1084 mlir::Location endLoc;
1085
1086 public:
1087 unsigned depth = 0;
1088
1089 LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
1090 : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
1091 beginLoc(loc), endLoc(loc) {
1092
1093 assert(entryBlock && "LexicalScope requires an entry block");
1094 cgf.curLexScope = this;
1095 if (parentScope)
1096 ++depth;
1097
1098 if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
1099 assert(fusedLoc.getLocations().size() == 2 && "too many locations");
1100 beginLoc = fusedLoc.getLocations()[0];
1101 endLoc = fusedLoc.getLocations()[1];
1102 }
1103 }
1104
1105 void setRetVal(mlir::Value v) { retVal = v; }
1106
1107 void cleanup();
1108 void restore() { cgf.curLexScope = parentScope; }
1109
1112 cleanup();
1113 restore();
1114 }
1115
1116 // ---
1117 // Coroutine tracking
1118 // ---
1119 bool hasCoreturn() const { return hasCoreturnStmt; }
1120 void setCoreturn() { hasCoreturnStmt = true; }
1121
1122 // ---
1123 // Kind
1124 // ---
1125 bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
1126 bool isRegular() { return scopeKind == Kind::Regular; }
1127 bool isSwitch() { return scopeKind == Kind::Switch; }
1128 bool isTernary() { return scopeKind == Kind::Ternary; }
1129 bool isTry() { return scopeKind == Kind::Try; }
1130 cir::TryOp getClosestTryParent();
1131 void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
1132 void setAsSwitch() { scopeKind = Kind::Switch; }
1133 void setAsTernary() { scopeKind = Kind::Ternary; }
1134 void setAsTry(cir::TryOp op) {
1135 scopeKind = Kind::Try;
1136 tryOp = op;
1137 }
1138
1139 // Lazy create cleanup block or return what's available.
1140 mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
1141 if (cleanupBlock)
1142 return cleanupBlock;
1143 cleanupBlock = createCleanupBlock(builder);
1144 return cleanupBlock;
1145 }
1146
1147 cir::TryOp getTry() {
1148 assert(isTry());
1149 return tryOp;
1150 }
1151
1152 mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
1153 return cleanupBlock;
1154 }
1155
1156 mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
1157 // Create the cleanup block but dont hook it up around just yet.
1158 mlir::OpBuilder::InsertionGuard guard(builder);
1159 mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
1160 : &cgf.curFn->getRegion(0);
1161 cleanupBlock = builder.createBlock(r);
1162 return cleanupBlock;
1163 }
1164
1165 // ---
1166 // Return handling.
1167 // ---
1168
1169 private:
1170 // On switches we need one return block per region, since cases don't
1171 // have their own scopes but are distinct regions nonetheless.
1172
1173 // TODO: This implementation should change once we have support for early
1174 // exits in MLIR structured control flow (llvm-project#161575)
1176 llvm::DenseMap<mlir::Block *, mlir::Location> retLocs;
1177 llvm::DenseMap<cir::CaseOp, unsigned> retBlockInCaseIndex;
1178 std::optional<unsigned> normalRetBlockIndex;
1179
1180 // There's usually only one ret block per scope, but this needs to be
1181 // get or create because of potential unreachable return statements, note
1182 // that for those, all source location maps to the first one found.
1183 mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1184 assert((isa_and_nonnull<cir::CaseOp>(
1185 cgf.builder.getBlock()->getParentOp()) ||
1186 retBlocks.size() == 0) &&
1187 "only switches can hold more than one ret block");
1188
1189 // Create the return block but don't hook it up just yet.
1190 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
1191 auto *b = cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
1192 retBlocks.push_back(b);
1193 updateRetLoc(b, loc);
1194 return b;
1195 }
1196
1197 cir::ReturnOp emitReturn(mlir::Location loc);
1198 void emitImplicitReturn();
1199
1200 public:
1202 mlir::Location getRetLoc(mlir::Block *b) { return retLocs.at(b); }
1203 void updateRetLoc(mlir::Block *b, mlir::Location loc) {
1204 retLocs.insert_or_assign(b, loc);
1205 }
1206
1207 mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
1208 // Check if we're inside a case region
1209 if (auto caseOp = mlir::dyn_cast_if_present<cir::CaseOp>(
1210 cgf.builder.getBlock()->getParentOp())) {
1211 auto iter = retBlockInCaseIndex.find(caseOp);
1212 if (iter != retBlockInCaseIndex.end()) {
1213 // Reuse existing return block
1214 mlir::Block *ret = retBlocks[iter->second];
1215 updateRetLoc(ret, loc);
1216 return ret;
1217 }
1218 // Create new return block
1219 mlir::Block *ret = createRetBlock(cgf, loc);
1220 retBlockInCaseIndex[caseOp] = retBlocks.size() - 1;
1221 return ret;
1222 }
1223
1224 if (normalRetBlockIndex) {
1225 mlir::Block *ret = retBlocks[*normalRetBlockIndex];
1226 updateRetLoc(ret, loc);
1227 return ret;
1228 }
1229
1230 mlir::Block *ret = createRetBlock(cgf, loc);
1231 normalRetBlockIndex = retBlocks.size() - 1;
1232 return ret;
1233 }
1234
1235 mlir::Block *getEntryBlock() { return entryBlock; }
1236 };
1237
1239
1240 typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
1241
1243
1244 void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
1245 QualType type);
1246
1248 Destroyer *destroyer);
1249
1251
1252 /// Start generating a thunk function.
1253 void startThunk(cir::FuncOp fn, GlobalDecl gd,
1254 const CIRGenFunctionInfo &fnInfo, bool isUnprototyped);
1255
1256 /// Finish generating a thunk function.
1257 void finishThunk();
1258
1259 /// Generate code for a thunk function.
1260 void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo,
1261 GlobalDecl gd, const ThunkInfo &thunk,
1262 bool isUnprototyped);
1263
1264 /// ----------------------
1265 /// CIR emit functions
1266 /// ----------------------
1267public:
1268 bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr,
1270 clang::SVETypeFlags typeFlags);
1271 mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts,
1272 mlir::Location loc);
1273 std::optional<mlir::Value>
1274 emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
1276 llvm::Triple::ArchType arch);
1277 std::optional<mlir::Value> emitAArch64SMEBuiltinExpr(unsigned builtinID,
1278 const CallExpr *expr);
1279 std::optional<mlir::Value> emitAArch64SVEBuiltinExpr(unsigned builtinID,
1280 const CallExpr *expr);
1281
1282 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
1283 SourceLocation loc,
1284 SourceLocation assumptionLoc,
1285 int64_t alignment,
1286 mlir::Value offsetValue = nullptr);
1287
1288 mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
1289 SourceLocation assumptionLoc,
1290 int64_t alignment,
1291 mlir::Value offsetValue = nullptr);
1292
1293private:
1294 void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
1295 clang::CharUnits alignment);
1296
1297 CIRGenCallee emitDirectCallee(const GlobalDecl &gd);
1298
1299public:
1301 llvm::StringRef fieldName,
1302 unsigned fieldIndex);
1303
1304 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1305 mlir::Location loc, clang::CharUnits alignment,
1306 bool insertIntoFnEntryBlock,
1307 mlir::Value arraySize = nullptr);
1308 mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty,
1309 mlir::Location loc, clang::CharUnits alignment,
1310 mlir::OpBuilder::InsertPoint ip,
1311 mlir::Value arraySize = nullptr);
1312
1313 void emitAggregateStore(mlir::Value value, Address dest);
1314
1315 void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
1316
1318
1320
1321 /// Emit an aggregate copy.
1322 ///
1323 /// \param isVolatile \c true iff either the source or the destination is
1324 /// volatile.
1325 /// \param MayOverlap Whether the tail padding of the destination might be
1326 /// occupied by some other object. More efficient code can often be
1327 /// generated if not.
1328 void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
1329 AggValueSlot::Overlap_t mayOverlap,
1330 bool isVolatile = false);
1331
1332 /// Emit code to compute the specified expression which can have any type. The
1333 /// result is returned as an RValue struct. If this is an aggregate
1334 /// expression, the aggloc/agglocvolatile arguments indicate where the result
1335 /// should be returned.
1338 bool ignoreResult = false);
1339
1340 /// Emits the code necessary to evaluate an arbitrary expression into the
1341 /// given memory location.
1342 void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals,
1343 bool isInitializer);
1344
1345 /// Similarly to emitAnyExpr(), however, the result will always be accessible
1346 /// even if no aggregate location is provided.
1348
1349 void emitAnyExprToExn(const Expr *e, Address addr);
1350
1351 void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
1352 QualType elementType, CharUnits elementAlign,
1353 Destroyer *destroyer);
1354
1355 mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
1356 QualType &baseType, Address &addr);
1358
1360
1362 LValueBaseInfo *baseInfo = nullptr);
1363
1364 std::pair<mlir::Value, mlir::Type>
1366 QualType inputType, std::string &constraintString,
1367 SourceLocation loc);
1368 std::pair<mlir::Value, mlir::Type>
1369 emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr,
1370 std::string &constraintString);
1371 mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
1372
1374 void emitAtomicInit(Expr *init, LValue dest);
1375 void emitAtomicStore(RValue rvalue, LValue dest, bool isInit);
1376 void emitAtomicStore(RValue rvalue, LValue dest, cir::MemOrder order,
1377 bool isVolatile, bool isInit);
1379 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
1380 llvm::function_ref<void(cir::MemOrder)> emitAtomicOp);
1381
1382 mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s);
1383
1384 AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
1385 mlir::OpBuilder::InsertPoint ip = {});
1386
1387 /// Emit code and set up symbol table for a variable declaration with auto,
1388 /// register, or no storage class specifier. These turn into simple stack
1389 /// objects, globals depending on target.
1390 void emitAutoVarDecl(const clang::VarDecl &d);
1391
1392 void emitAutoVarCleanups(const AutoVarEmission &emission);
1393 /// Emit the initializer for an allocated variable. If this call is not
1394 /// associated with the call to emitAutoVarAlloca (as the address of the
1395 /// emission is not directly an alloca), the allocatedSeparately parameter can
1396 /// be used to suppress the assertions. However, this should only be used in
1397 /// extreme cases, as it doesn't properly reflect the language/AST.
1398 void emitAutoVarInit(const AutoVarEmission &emission);
1399 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1401
1402 void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
1403
1404 void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
1405 CXXCtorInitializer *baseInit);
1406
1408
1409 mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
1410
1411 RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
1412 const clang::CallExpr *e, ReturnValueSlot returnValue);
1413
1414 /// Returns a Value corresponding to the size of the given expression by
1415 /// emitting a `cir.objsize` operation.
1416 ///
1417 /// \param e The expression whose object size to compute
1418 /// \param type Determines the semantics of the object size computation.
1419 /// The type parameter is a 2-bit value where:
1420 /// bit 0 (type & 1): 0 = whole object, 1 = closest subobject
1421 /// bit 1 (type & 2): 0 = maximum size, 2 = minimum size
1422 /// \param resType The result type for the size value
1423 /// \param emittedE Optional pre-emitted pointer value. If non-null, we'll
1424 /// call `cir.objsize` on this value rather than emitting e.
1425 /// \param isDynamic If true, allows runtime evaluation via dynamic mode
1426 mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type,
1427 cir::IntType resType, mlir::Value emittedE,
1428 bool isDynamic);
1429
1430 mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e,
1431 unsigned type,
1432 cir::IntType resType,
1433 mlir::Value emittedE,
1434 bool isDynamic);
1435
1436 int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts);
1437
1439
1440 RValue emitCall(const CIRGenFunctionInfo &funcInfo,
1441 const CIRGenCallee &callee, ReturnValueSlot returnValue,
1442 const CallArgList &args, cir::CIRCallOpInterface *callOp,
1443 mlir::Location loc);
1446 const CallArgList &args,
1447 cir::CIRCallOpInterface *callOrTryCall = nullptr) {
1448 assert(currSrcLoc && "source location must have been set");
1449 return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
1450 *currSrcLoc);
1451 }
1452
1453 RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
1455
1456 /// Emit the call and return for a thunk function.
1457 void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk,
1458 bool isUnprototyped);
1459
1460 void emitCallArg(CallArgList &args, const clang::Expr *e,
1461 clang::QualType argType);
1462 void emitCallArgs(
1463 CallArgList &args, PrototypeWrapper prototype,
1464 llvm::iterator_range<clang::CallExpr::const_arg_iterator> argRange,
1465 AbstractCallee callee = AbstractCallee(), unsigned paramsToSkip = 0);
1470
1471 template <typename T>
1472 mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
1473 mlir::ArrayAttr value,
1474 cir::CaseOpKind kind,
1475 bool buildingTopLevelCase);
1476
1478
1479 mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s,
1480 mlir::Type condType,
1481 bool buildingTopLevelCase);
1482
1483 LValue emitCastLValue(const CastExpr *e);
1484
1485 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
1486 /// sanitizer is enabled, a runtime check is also emitted.
1487 mlir::Value emitCheckedArgForAssume(const Expr *e);
1488
1489 /// Emit a conversion from the specified complex type to the specified
1490 /// destination type, where the destination type is an LLVM scalar type.
1491 mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
1492 QualType dstTy, SourceLocation loc);
1493
1496
1498
1499 mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
1500 cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1501 cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
1502 cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
1503 cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
1504 mlir::Value coroframeAddr);
1506
1507 void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
1508
1510
1511 mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
1512
1513 mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s);
1514
1516 AggValueSlot dest);
1517
1520 Address arrayBegin, const CXXConstructExpr *e,
1521 bool newPointerIsChecked,
1522 bool zeroInitialize = false);
1524 mlir::Value numElements, Address arrayBase,
1525 const CXXConstructExpr *e,
1526 bool newPointerIsChecked,
1527 bool zeroInitialize);
1529 clang::CXXCtorType type, bool forVirtualBase,
1530 bool delegating, AggValueSlot thisAVS,
1531 const clang::CXXConstructExpr *e);
1532
1534 clang::CXXCtorType type, bool forVirtualBase,
1535 bool delegating, Address thisAddr,
1537
1538 void emitCXXDeleteExpr(const CXXDeleteExpr *e);
1539
1541 bool forVirtualBase, bool delegating,
1542 Address thisAddr, QualType thisTy);
1543
1545 mlir::Value thisVal, QualType thisTy,
1546 mlir::Value implicitParam,
1547 QualType implicitParamTy, const CallExpr *e);
1548
1549 mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
1551
1554
1556 const Expr *e, Address base, mlir::Value memberPtr,
1557 const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo);
1558
1560 const clang::CXXMethodDecl *md, const CIRGenCallee &callee,
1561 ReturnValueSlot returnValue, mlir::Value thisPtr,
1562 mlir::Value implicitParam, clang::QualType implicitParamTy,
1563 const clang::CallExpr *ce, CallArgList *rtlArgs);
1564
1566 const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
1567 ReturnValueSlot returnValue, bool hasQualifier,
1568 clang::NestedNameSpecifier qualifier, bool isArrow,
1569 const clang::Expr *base);
1570
1573
1574 mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
1575
1576 void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType,
1577 mlir::Type elementTy, Address beginPtr,
1578 mlir::Value numElements,
1579 mlir::Value allocSizeWithoutCookie);
1580
1582 const CXXMethodDecl *md,
1584
1587
1589
1591 const CallExpr *callExpr,
1593
1594 void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType,
1595 Address ptr);
1596
1597 void emitCXXThrowExpr(const CXXThrowExpr *e);
1598
1599 mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
1600
1602 clang::CXXCtorType ctorType, FunctionArgList &args);
1603
1604 // It's important not to confuse this and emitDelegateCXXConstructorCall.
1605 // Delegating constructors are the C++11 feature. The constructor delegate
1606 // optimization is used to reduce duplication in the base and complete
1607 // constructors where they are substantially the same.
1609 const FunctionArgList &args);
1610
1611 void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr,
1612 QualType deleteTy);
1613
1614 mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
1615
1616 mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e);
1617 mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce);
1618
1619 /// Emit an expression as an initializer for an object (variable, field, etc.)
1620 /// at the given location. The expression is not necessarily the normal
1621 /// initializer for the object, and the address is not necessarily
1622 /// its normal location.
1623 ///
1624 /// \param init the initializing expression
1625 /// \param d the object to act as if we're initializing
1626 /// \param lvalue the lvalue to initialize
1627 /// \param capturedByInit true if \p d is a __block variable whose address is
1628 /// potentially changed by the initializer
1629 void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d,
1630 LValue lvalue, bool capturedByInit = false);
1631
1632 mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
1633
1634 mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
1635
1636 mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s);
1637
1639
1641 clang::Expr *init);
1642
1644
1645 mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType);
1646
1647 mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
1648
1649 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
1650
1651 void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty);
1652
1653 mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee,
1655 mlir::NamedAttrList attrs = {});
1656
1657 void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc);
1658
1659 /// Emit the computation of the specified expression of scalar type.
1660 mlir::Value emitScalarExpr(const clang::Expr *e,
1661 bool ignoreResultAssign = false);
1662
1663 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
1664 cir::UnaryOpKind kind, bool isPre);
1665
1666 /// Build a debug stoppoint if we are emitting debug info.
1667 void emitStopPoint(const Stmt *s);
1668
1669 // Build CIR for a statement. useCurrentScope should be true if no
1670 // new scopes need be created when finding a compound statement.
1671 mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope,
1672 llvm::ArrayRef<const Attr *> attrs = {});
1673
1674 mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s,
1675 bool useCurrentScope);
1676
1677 mlir::LogicalResult emitForStmt(const clang::ForStmt &s);
1678
1679 void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator,
1680 CallArgList &callArgs);
1681
1682 RValue emitCoawaitExpr(const CoawaitExpr &e,
1683 AggValueSlot aggSlot = AggValueSlot::ignored(),
1684 bool ignoreResult = false);
1685
1686 RValue emitCoyieldExpr(const CoyieldExpr &e,
1687 AggValueSlot aggSlot = AggValueSlot::ignored(),
1688 bool ignoreResult = false);
1689 /// Emit the computation of the specified expression of complex type,
1690 /// returning the result.
1691 mlir::Value emitComplexExpr(const Expr *e);
1692
1693 void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
1694
1695 mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv,
1696 cir::UnaryOpKind op, bool isPre);
1697
1698 LValue emitComplexAssignmentLValue(const BinaryOperator *e);
1699 LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
1700 LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
1701 mlir::Value &result);
1702
1703 mlir::LogicalResult
1704 emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
1705 AggValueSlot slot = AggValueSlot::ignored());
1706
1707 mlir::LogicalResult
1709 Address *lastValue = nullptr,
1710 AggValueSlot slot = AggValueSlot::ignored());
1711
1712 void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
1713 mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
1714 LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
1715
1716 mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s,
1717 mlir::Type condType,
1718 bool buildingTopLevelCase);
1719
1721 clang::CXXCtorType ctorType,
1722 const FunctionArgList &args,
1724
1725 /// We are performing a delegate call; that is, the current function is
1726 /// delegating to another one. Produce a r-value suitable for passing the
1727 /// given parameter.
1728 void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param,
1730
1731 /// Emit an `if` on a boolean condition to the specified blocks.
1732 /// FIXME: Based on the condition, this might try to simplify the codegen of
1733 /// the conditional based on the branch.
1734 /// In the future, we may apply code generation simplifications here,
1735 /// similar to those used in classic LLVM codegen
1736 /// See `EmitBranchOnBoolExpr` for inspiration.
1737 mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond,
1738 const clang::Stmt *thenS,
1739 const clang::Stmt *elseS);
1740 cir::IfOp emitIfOnBoolExpr(const clang::Expr *cond,
1741 BuilderCallbackRef thenBuilder,
1742 mlir::Location thenLoc,
1743 BuilderCallbackRef elseBuilder,
1744 std::optional<mlir::Location> elseLoc = {});
1745
1746 mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
1747
1748 LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e);
1749
1750 mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
1751 mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
1752
1753 void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md);
1754 void emitLambdaStaticInvokeBody(const CXXMethodDecl *md);
1755
1756 mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
1757
1758 /// Emit code to compute the specified expression,
1759 /// ignoring the result.
1760 void emitIgnoredExpr(const clang::Expr *e);
1761
1762 RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc);
1763
1764 /// Load a complex number from the specified l-value.
1765 mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc);
1766
1767 RValue emitLoadOfExtVectorElementLValue(LValue lv);
1768
1769 /// Given an expression that represents a value lvalue, this method emits
1770 /// the address of the lvalue, then loads the result as an rvalue,
1771 /// returning the rvalue.
1772 RValue emitLoadOfLValue(LValue lv, SourceLocation loc);
1773
1774 Address emitLoadOfReference(LValue refLVal, mlir::Location loc,
1775 LValueBaseInfo *pointeeBaseInfo);
1776 LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc,
1777 QualType refTy, AlignmentSource source);
1778
1779 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1780 /// care to appropriately convert from the memory representation to
1781 /// the LLVM value representation. The l-value must be a simple
1782 /// l-value.
1783 mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc);
1784 mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, QualType ty,
1785 SourceLocation loc, LValueBaseInfo baseInfo);
1786
1787 /// Emit code to compute a designator that specifies the location
1788 /// of the expression.
1789 /// FIXME: document this function better.
1790 LValue emitLValue(const clang::Expr *e);
1791 LValue emitLValueForBitField(LValue base, const FieldDecl *field);
1792 LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
1793
1794 LValue emitLValueForLambdaField(const FieldDecl *field);
1795 LValue emitLValueForLambdaField(const FieldDecl *field,
1796 mlir::Value thisValue);
1797
1798 /// Like emitLValueForField, excpet that if the Field is a reference, this
1799 /// will return the address of the reference and not the address of the value
1800 /// stored in the reference.
1801 LValue emitLValueForFieldInitialization(LValue base,
1802 const clang::FieldDecl *field,
1803 llvm::StringRef fieldName);
1804
1805 LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
1806
1807 LValue emitMemberExpr(const MemberExpr *e);
1808
1809 /// Emit a musttail call for a thunk with a potentially different ABI.
1810 void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr,
1811 cir::FuncOp callee);
1812
1813 /// Emit a call to an AMDGPU builtin function.
1814 std::optional<mlir::Value> emitAMDGPUBuiltinExpr(unsigned builtinID,
1815 const CallExpr *expr);
1816
1817 LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
1818
1819 LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
1820
1821 /// Given an expression with a pointer type, emit the value and compute our
1822 /// best estimate of the alignment of the pointee.
1823 ///
1824 /// One reasonable way to use this information is when there's a language
1825 /// guarantee that the pointer must be aligned to some stricter value, and
1826 /// we're simply trying to ensure that sufficiently obvious uses of under-
1827 /// aligned objects don't get miscompiled; for example, a placement new
1828 /// into the address of a local variable. In such a case, it's quite
1829 /// reasonable to just ignore the returned alignment when it isn't from an
1830 /// explicit source.
1831 Address emitPointerWithAlignment(const clang::Expr *expr,
1832 LValueBaseInfo *baseInfo = nullptr);
1833
1834 /// Emits a reference binding to the passed in expression.
1835 RValue emitReferenceBindingToExpr(const Expr *e);
1836
1837 mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s);
1838
1839 RValue emitRotate(const CallExpr *e, bool isRotateLeft);
1840
1841 mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e);
1842
1843 /// Emit a conversion from the specified type to the specified destination
1844 /// type, both of which are CIR scalar types.
1845 mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType,
1846 clang::QualType dstType,
1847 clang::SourceLocation loc);
1848
1849 void emitScalarInit(const clang::Expr *init, mlir::Location loc,
1850 LValue lvalue, bool capturedByInit = false);
1851
1852 mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx,
1853 const Expr *argExpr);
1854
1855 void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage);
1856
1857 /// Emit a guarded initializer for a static local variable or a static
1858 /// data member of a class template instantiation.
1859 void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp,
1860 bool performInit);
1861
1862 void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest,
1863 bool isInit);
1864
1865 void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile,
1866 clang::QualType ty, LValueBaseInfo baseInfo,
1867 bool isInit = false, bool isNontemporal = false);
1868 void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit);
1869
1870 /// Store the specified rvalue into the specified
1871 /// lvalue, where both are guaranteed to the have the same type, and that type
1872 /// is 'Ty'.
1873 void emitStoreThroughLValue(RValue src, LValue dst, bool isInit = false);
1874
1875 mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult);
1876
1877 LValue emitStringLiteralLValue(const StringLiteral *e,
1878 llvm::StringRef name = ".str");
1879
1880 mlir::LogicalResult emitSwitchBody(const clang::Stmt *s);
1881 mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s,
1882 bool buildingTopLevelCase);
1883 mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
1884
1885 std::optional<mlir::Value>
1886 emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e,
1887 ReturnValueSlot &returnValue);
1888
1889 /// Given a value and its clang type, returns the value casted to its memory
1890 /// representation.
1891 /// Note: CIR defers most of the special casting to the final lowering passes
1892 /// to conserve the high level information.
1893 mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
1894
1895 /// EmitFromMemory - Change a scalar value from its memory
1896 /// representation to its value representation.
1897 mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty);
1898
1899 /// Emit a trap instruction, which is used to abort the program in an abnormal
1900 /// way, usually for debugging purposes.
1901 /// \p createNewBlock indicates whether to create a new block for the IR
1902 /// builder. Since the `cir.trap` operation is a terminator, operations that
1903 /// follow a trap cannot be emitted after `cir.trap` in the same block. To
1904 /// ensure these operations get emitted successfully, you need to create a new
1905 /// dummy block and set the insertion point there before continuing from the
1906 /// trap operation.
1907 void emitTrap(mlir::Location loc, bool createNewBlock);
1908
1909 LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
1910
1911 mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
1912
1913 /// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
1914 /// checking is enabled. Otherwise, just emit an unreachable instruction.
1915 /// \p createNewBlock indicates whether to create a new block for the IR
1916 /// builder. Since the `cir.unreachable` operation is a terminator, operations
1917 /// that follow an unreachable point cannot be emitted after `cir.unreachable`
1918 /// in the same block. To ensure these operations get emitted successfully,
1919 /// you need to create a dummy block and set the insertion point there before
1920 /// continuing from the unreachable point.
1921 void emitUnreachable(clang::SourceLocation loc, bool createNewBlock);
1922
1923 /// This method handles emission of any variable declaration
1924 /// inside a function, including static vars etc.
1925 void emitVarDecl(const clang::VarDecl &d);
1926
1927 void emitVariablyModifiedType(QualType ty);
1928
1929 mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
1930
1931 std::optional<mlir::Value> emitX86BuiltinExpr(unsigned builtinID,
1932 const CallExpr *expr);
1933
1934 /// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
1935 /// nonnull, if 1\p LHS is marked _Nonnull.
1936 void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
1937 clang::SourceLocation loc);
1938
1939 /// An object to manage conditionally-evaluated expressions.
1941 CIRGenFunction &cgf;
1942 mlir::OpBuilder::InsertPoint insertPt;
1943
1944 public:
1946 : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
1947 ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
1948 : cgf(cgf), insertPt(ip) {}
1949
1951 assert(cgf.outermostConditional != this);
1952 if (!cgf.outermostConditional)
1953 cgf.outermostConditional = this;
1954 }
1955
1957 assert(cgf.outermostConditional != nullptr);
1958 if (cgf.outermostConditional == this)
1959 cgf.outermostConditional = nullptr;
1960 }
1961
1962 /// Returns the insertion point which will be executed prior to each
1963 /// evaluation of the conditional code. In LLVM OG, this method
1964 /// is called getStartingBlock.
1965 mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
1966 };
1967
1969 std::optional<LValue> lhs{}, rhs{};
1970 mlir::Value result{};
1971 };
1972
1973 // Return true if we're currently emitting one branch or the other of a
1974 // conditional expression.
1975 bool isInConditionalBranch() const { return outermostConditional != nullptr; }
1976
1977 void setBeforeOutermostConditional(mlir::Value value, Address addr) {
1978 assert(isInConditionalBranch());
1979 {
1980 mlir::OpBuilder::InsertionGuard guard(builder);
1981 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
1982 builder.createStore(
1983 value.getLoc(), value, addr, /*isVolatile=*/false,
1984 mlir::IntegerAttr::get(
1985 mlir::IntegerType::get(value.getContext(), 64),
1986 (uint64_t)addr.getAlignment().getAsAlign().value()));
1987 }
1988 }
1989
1990 // Points to the outermost active conditional control. This is used so that
1991 // we know if a temporary should be destroyed conditionally.
1993
1994 /// An RAII object to record that we're evaluating a statement
1995 /// expression.
1997 CIRGenFunction &cgf;
1998
1999 /// We have to save the outermost conditional: cleanups in a
2000 /// statement expression aren't conditional just because the
2001 /// StmtExpr is.
2002 ConditionalEvaluation *savedOutermostConditional;
2003
2004 public:
2006 : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
2007 cgf.outermostConditional = nullptr;
2008 }
2009
2011 cgf.outermostConditional = savedOutermostConditional;
2012 }
2013 };
2014
2015 template <typename FuncTy>
2016 ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
2017 const FuncTy &branchGenFunc);
2018
2019 mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
2020 const clang::Stmt *thenS,
2021 const clang::Stmt *elseS);
2022
2023 /// Build a "reference" to a va_list; this is either the address or the value
2024 /// of the expression, depending on how va_list is defined.
2025 Address emitVAListRef(const Expr *e);
2026
2027 /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
2028 ///
2029 /// \param vaList A reference to the \c va_list as emitted by either
2030 /// \c emitVAListRef or \c emitMSVAListRef.
2031 void emitVAStart(mlir::Value vaList);
2032
2033 /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
2034 ///
2035 /// \param vaList A reference to the \c va_list as emitted by either
2036 /// \c emitVAListRef or \c emitMSVAListRef.
2037 void emitVAEnd(mlir::Value vaList);
2038
2039 /// Generate code to get an argument from the passed in pointer
2040 /// and update it accordingly.
2041 ///
2042 /// \param ve The \c VAArgExpr for which to generate code.
2043 ///
2044 /// \param vaListAddr Receives a reference to the \c va_list as emitted by
2045 /// either \c emitVAListRef or \c emitMSVAListRef.
2046 ///
2047 /// \returns SSA value with the argument.
2048 mlir::Value emitVAArg(VAArgExpr *ve);
2049
2050 /// ----------------------
2051 /// CIR build helpers
2052 /// -----------------
2053public:
2054 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2055 const Twine &name = "tmp",
2056 mlir::Value arraySize = nullptr,
2057 bool insertIntoFnEntryBlock = false);
2058 cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
2059 const Twine &name = "tmp",
2060 mlir::OpBuilder::InsertPoint ip = {},
2061 mlir::Value arraySize = nullptr);
2062 Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
2063 const Twine &name = "tmp",
2064 mlir::Value arraySize = nullptr,
2065 Address *alloca = nullptr,
2066 mlir::OpBuilder::InsertPoint ip = {});
2067 Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
2068 mlir::Location loc,
2069 const Twine &name = "tmp",
2070 mlir::Value arraySize = nullptr,
2071 mlir::OpBuilder::InsertPoint ip = {});
2072 Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc,
2073 const Twine &name);
2074
2075 /// Create a temporary memory object of the given type, with
2076 /// appropriate alignmen and cast it to the default address space. Returns
2077 /// the original alloca instruction by \p Alloca if it is not nullptr.
2078 Address createMemTemp(QualType t, mlir::Location loc,
2079 const Twine &name = "tmp", Address *alloca = nullptr,
2080 mlir::OpBuilder::InsertPoint ip = {});
2081 Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
2082 const Twine &name = "tmp", Address *alloca = nullptr,
2083 mlir::OpBuilder::InsertPoint ip = {});
2084
2085 mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const {
2086 if (cir::GlobalOp globalOp = v.getDefiningOp<cir::GlobalOp>())
2087 cgm.errorNYI("Global op addrspace cast");
2088 return builder.createAddrSpaceCast(v, destTy);
2089 }
2090
2091 //===--------------------------------------------------------------------===//
2092 // OpenMP Emission
2093 //===--------------------------------------------------------------------===//
2094public:
2095 mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s);
2096 mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s);
2097 mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s);
2098 mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s);
2099 mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s);
2100 mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s);
2101 mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s);
2102 mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s);
2103 mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s);
2104 mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s);
2105 mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s);
2106 mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s);
2107 mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s);
2108 mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s);
2109 mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s);
2110 mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s);
2111 mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s);
2112 mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s);
2113 mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s);
2114 mlir::LogicalResult
2115 emitOMPParallelForDirective(const OMPParallelForDirective &s);
2116 mlir::LogicalResult
2117 emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s);
2118 mlir::LogicalResult
2119 emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s);
2120 mlir::LogicalResult
2121 emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s);
2122 mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s);
2123 mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s);
2124 mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s);
2125 mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s);
2126 mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s);
2127 mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s);
2128 mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s);
2129 mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s);
2130 mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s);
2131 mlir::LogicalResult
2133 mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s);
2134 mlir::LogicalResult
2136 mlir::LogicalResult
2138 mlir::LogicalResult
2140 mlir::LogicalResult
2142 mlir::LogicalResult
2144 mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s);
2145 mlir::LogicalResult
2147 mlir::LogicalResult
2149 mlir::LogicalResult
2151 mlir::LogicalResult
2153 mlir::LogicalResult
2155 mlir::LogicalResult
2157 mlir::LogicalResult
2158 emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s);
2159 mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(
2163 mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(
2167 mlir::LogicalResult
2169 mlir::LogicalResult emitOMPDistributeParallelForDirective(
2173 mlir::LogicalResult
2177 mlir::LogicalResult emitOMPTargetParallelForSimdDirective(
2179 mlir::LogicalResult
2181 mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(
2183 mlir::LogicalResult
2185 mlir::LogicalResult
2187 mlir::LogicalResult
2193 mlir::LogicalResult
2195 mlir::LogicalResult
2197 mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(
2205 mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s);
2206 mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s);
2207 mlir::LogicalResult
2209 mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s);
2210 mlir::LogicalResult
2212 mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s);
2213 mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s);
2214 mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s);
2215
2219 void emitOMPAllocateDecl(const OMPAllocateDecl &d);
2222 void emitOMPRequiresDecl(const OMPRequiresDecl &d);
2223
2224private:
2225 template <typename Op>
2226 void emitOpenMPClauses(Op &op, ArrayRef<const OMPClause *> clauses);
2227
2228 //===--------------------------------------------------------------------===//
2229 // OpenACC Emission
2230 //===--------------------------------------------------------------------===//
2231private:
2232 template <typename Op>
2233 Op emitOpenACCOp(mlir::Location start, OpenACCDirectiveKind dirKind,
2235 // Function to do the basic implementation of an operation with an Associated
2236 // Statement. Models AssociatedStmtConstruct.
2237 template <typename Op, typename TermOp>
2238 mlir::LogicalResult
2239 emitOpenACCOpAssociatedStmt(mlir::Location start, mlir::Location end,
2240 OpenACCDirectiveKind dirKind,
2242 const Stmt *associatedStmt);
2243
2244 template <typename Op, typename TermOp>
2245 mlir::LogicalResult emitOpenACCOpCombinedConstruct(
2246 mlir::Location start, mlir::Location end, OpenACCDirectiveKind dirKind,
2247 llvm::ArrayRef<const OpenACCClause *> clauses, const Stmt *loopStmt);
2248
2249 template <typename Op>
2250 void emitOpenACCClauses(Op &op, OpenACCDirectiveKind dirKind,
2252 // The second template argument doesn't need to be a template, since it should
2253 // always be an mlir::acc::LoopOp, but as this is a template anyway, we make
2254 // it a template argument as this way we can avoid including the OpenACC MLIR
2255 // headers here. We will count on linker failures/explicit instantiation to
2256 // ensure we don't mess this up, but it is only called from 1 place, and
2257 // instantiated 3x.
2258 template <typename ComputeOp, typename LoopOp>
2259 void emitOpenACCClauses(ComputeOp &op, LoopOp &loopOp,
2260 OpenACCDirectiveKind dirKind,
2262
2263 // The OpenACC LoopOp requires that we have auto, seq, or independent on all
2264 // LoopOp operations for the 'none' device type case. This function checks if
2265 // the LoopOp has one, else it updates it to have one.
2266 void updateLoopOpParallelism(mlir::acc::LoopOp &op, bool isOrphan,
2268
2269 // The OpenACC 'cache' construct actually applies to the 'loop' if present. So
2270 // keep track of the 'loop' so that we can add the cache vars to it correctly.
2271 mlir::acc::LoopOp *activeLoopOp = nullptr;
2272
2273 struct ActiveOpenACCLoopRAII {
2274 CIRGenFunction &cgf;
2275 mlir::acc::LoopOp *oldLoopOp;
2276
2277 ActiveOpenACCLoopRAII(CIRGenFunction &cgf, mlir::acc::LoopOp *newOp)
2278 : cgf(cgf), oldLoopOp(cgf.activeLoopOp) {
2279 cgf.activeLoopOp = newOp;
2280 }
2281 ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; }
2282 };
2283
2284 // Keep track of the last place we inserted a 'recipe' so that we can insert
2285 // the next one in lexical order.
2286 mlir::OpBuilder::InsertPoint lastRecipeLocation;
2287
2288public:
2289 // Helper type used to store the list of important information for a 'data'
2290 // clause variable, or a 'cache' variable reference.
2292 mlir::Location beginLoc;
2293 mlir::Value varValue;
2294 std::string name;
2295 // The type of the original variable reference: that is, after 'bounds' have
2296 // removed pointers/array types/etc. So in the case of int arr[5], and a
2297 // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'.
2301 // The list of types that we found when going through the bounds, which we
2302 // can use to properly set the alloca section.
2304 };
2305
2306 // Gets the collection of info required to lower and OpenACC clause or cache
2307 // construct variable reference.
2309 // Helper function to emit the integer expressions as required by an OpenACC
2310 // clause/construct.
2311 mlir::Value emitOpenACCIntExpr(const Expr *intExpr);
2312 // Helper function to emit an integer constant as an mlir int type, used for
2313 // constants in OpenACC constructs/clauses.
2314 mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width,
2315 int64_t value);
2316
2317 mlir::LogicalResult
2319 mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s);
2320 mlir::LogicalResult
2322 mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s);
2323 mlir::LogicalResult
2325 mlir::LogicalResult
2327 mlir::LogicalResult
2329 mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s);
2330 mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s);
2331 mlir::LogicalResult
2333 mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s);
2334 mlir::LogicalResult
2336 mlir::LogicalResult
2338 mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s);
2339
2342
2343 /// Create a temporary memory object for the given aggregate type.
2344 AggValueSlot createAggTemp(QualType ty, mlir::Location loc,
2345 const Twine &name = "tmp",
2346 Address *alloca = nullptr) {
2348 return AggValueSlot::forAddr(
2349 createMemTemp(ty, loc, name, alloca), ty.getQualifiers(),
2352 }
2353
2354private:
2355 QualType getVarArgType(const Expr *arg);
2356};
2357
2358} // namespace clang::CIRGen
2359
2360#endif
Defines the clang::ASTContext interface.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
Defines the clang::Expr interface and subclasses for C++ expressions.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
Enumerates target-specific builtins in their own namespaces within namespace clang.
C Language Family Type Representation.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
This represents 'pragma omp cancel' directive.
This represents 'pragma omp cancellation point' directive.
This represents 'pragma omp dispatch' directive.
This represents 'pragma omp distribute' directive.
This represents 'pragma omp distribute parallel for' composite directive.
This represents 'pragma omp distribute parallel for simd' composite directive.
This represents 'pragma omp distribute simd' composite directive.
This represents 'pragma omp error' directive.
Represents the 'pragma omp fuse' loop transformation directive.
This represents 'pragma omp loop' directive.
Represents the 'pragma omp interchange' loop transformation directive.
This represents 'pragma omp interop' directive.
This represents 'pragma omp masked' directive.
This represents 'pragma omp masked taskloop' directive.
This represents 'pragma omp masked taskloop simd' directive.
This represents 'pragma omp master taskloop' directive.
This represents 'pragma omp master taskloop simd' directive.
This represents 'pragma omp metadirective' directive.
This represents 'pragma omp parallel loop' directive.
This represents 'pragma omp parallel masked taskloop' directive.
This represents 'pragma omp parallel masked taskloop simd' directive.
This represents 'pragma omp parallel master taskloop' directive.
This represents 'pragma omp parallel master taskloop simd' directive.
Represents the 'pragma omp reverse' loop transformation directive.
This represents 'pragma omp scan' directive.
This represents the 'pragma omp stripe' loop transformation directive.
This represents 'pragma omp target data' directive.
This represents 'pragma omp target' directive.
This represents 'pragma omp target enter data' directive.
This represents 'pragma omp target exit data' directive.
This represents 'pragma omp target parallel' directive.
This represents 'pragma omp target parallel for' directive.
This represents 'pragma omp target parallel for simd' directive.
This represents 'pragma omp target parallel loop' directive.
This represents 'pragma omp target simd' directive.
This represents 'pragma omp target teams' directive.
This represents 'pragma omp target teams distribute' combined directive.
This represents 'pragma omp target teams distribute parallel for' combined directive.
This represents 'pragma omp target teams distribute parallel for simd' combined directive.
This represents 'pragma omp target teams distribute simd' combined directive.
This represents 'pragma omp target teams loop' directive.
This represents 'pragma omp target update' directive.
This represents 'pragma omp taskloop' directive.
This represents 'pragma omp taskloop simd' directive.
This represents 'pragma omp teams' directive.
This represents 'pragma omp teams distribute' directive.
This represents 'pragma omp teams distribute parallel for' composite directive.
This represents 'pragma omp teams distribute parallel for simd' composite directive.
This represents 'pragma omp teams distribute simd' combined directive.
This represents 'pragma omp teams loop' directive.
This represents the 'pragma omp tile' loop transformation directive.
This represents the 'pragma omp unroll' loop transformation directive.
This class represents a 'loop' construct. The 'loop' construct applies to a 'for' loop (or range-for ...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3730
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3269
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6927
Represents an attribute applied to a statement.
Definition Stmt.h:2195
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition Expr.h:4456
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition Expr.h:4494
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition Expr.h:4491
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
BreakStmt - This represents a break.
Definition Stmt.h:3127
mlir::Value getPointer() const
Definition Address.h:96
static Address invalid()
Definition Address.h:74
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Value getBasePointer() const
Definition Address.h:101
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
AbstractCallee(const clang::FunctionDecl *fd)
const clang::ParmVarDecl * getParamDecl(unsigned I) const
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
CXXDefaultInitExprScope(CIRGenFunction &cgf, const CXXDefaultInitExpr *e)
An object to manage conditionally-evaluated expressions.
ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
mlir::OpBuilder::InsertPoint getInsertPoint() const
Returns the insertion point which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forReference(mlir::TypedAttr c)
static ConstantEmission forValue(mlir::TypedAttr c)
LValue getReferenceLValue(CIRGenFunction &cgf, Expr *refExpr) const
DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
FieldConstructionScope(CIRGenFunction &cgf, Address thisAddr)
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue, LValue lvalue)
OpaqueValueMapping(CIRGenFunction &cgf, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?
OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
RunCleanupsScope(CIRGenFunction &cgf)
Enter a new cleanup scope.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void restore()
Can be used to restore the state early, before the dtor is run.
SourceLocRAIIObject(CIRGenFunction &cgf, mlir::Location value)
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
static bool hasScalarEvaluationKind(clang::QualType type)
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
void emitOpenACCRoutine(const OpenACCRoutineDecl &d)
void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &d, cir::GlobalOp gv, cir::GetGlobalOp gvAddr)
Add the initializer for 'd' to the global variable that has already been created for it.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
void generateThunk(cir::FuncOp fn, const CIRGenFunctionInfo &fnInfo, GlobalDecl gd, const ThunkInfo &thunk, bool isUnprototyped)
Generate code for a thunk function.
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
clang::CurrentSourceLocExprScope::SourceLocExprScopeGuard SourceLocExprScopeGuard
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d, mlir::OpBuilder::InsertPoint ip={})
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, clang::QualType::DestructionKind dtorKind)
Enter a destroy cleanup for the given local variable.
ImplicitParamDecl * cxxabiThisDecl
CXXThisDecl - When generating code for a C++ member function, this will hold the implicit 'this' decl...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
bool curFuncIsThunk
In C++, whether we are code generating a thunk.
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
llvm::ScopedHashTable< const clang::Decl *, mlir::Value > SymTableTy
The symbol table maps a variable name to a value in the current scope.
void emitInvariantStart(CharUnits size, mlir::Value addr, mlir::Location loc)
Definition CIRGenCXX.cpp:33
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, CallArgList &callArgs)
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
void emitOMPRequiresDecl(const OMPRequiresDecl &d)
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
Address cxxDefaultInitExprThis
The value of 'this' to sue when evaluating CXXDefaultInitExprs within this expression.
void emitStaticVarDecl(const VarDecl &d, cir::GlobalLinkageKind linkage)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
void setBeforeOutermostConditional(mlir::Value value, Address addr)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::Value loadCXXThis()
Load the value for 'this'.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void addCatchHandlerAttr(const CXXCatchStmt *catchStmt, SmallVector< mlir::Attribute > &handlerAttrs)
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
const clang::Decl * curFuncDecl
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
LValue emitLValueForLambdaField(const FieldDecl *field)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOrTryCall=nullptr)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
bool isTrivialInitializer(const Expr *init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void emitOpenACCDeclare(const OpenACCDeclareDecl &d)
Address getAddrOfLocalVar(const clang::VarDecl *vd)
Return the address of a local variable.
void emitAnyExprToExn(const Expr *e, Address addr)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
void emitMustTailThunk(GlobalDecl gd, mlir::Value adjustedThisPtr, cir::FuncOp callee)
Emit a musttail call for a thunk with a potentially different ABI.
Address getAsNaturalAddressOf(Address addr, QualType pointeeTy)
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, bool delegating)
Return the VTT parameter that should be passed to a base constructor/destructor with virtual bases.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
void initializeVTablePointers(mlir::Location loc, const clang::CXXRecordDecl *rd)
mlir::Type convertType(const TypeDecl *t)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
void initializeVTablePointer(mlir::Location loc, const VPtr &vptr)
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
void emitOMPDeclareReduction(const OMPDeclareReductionDecl &d)
void emitAggregateStore(mlir::Value value, Address dest)
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
ConditionalEvaluation * outermostConditional
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl, CXXCtorInitializer *baseInit)
RValue emitAtomicExpr(AtomicExpr *e)
void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *d, LValue lvalue, bool capturedByInit=false)
Emit an expression as an initializer for an object (variable, field, etc.) at the given location.
void emitCXXGuardedInit(const VarDecl &varDecl, cir::GlobalOp globalOp, bool performInit)
Emit a guarded initializer for a static local variable or a static data member of a class template in...
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass)
const TargetCIRGenInfo & getTargetHooks() const
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
RValue emitCoyieldExpr(const CoyieldExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e)
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
CleanupKind getCleanupKind(QualType::DestructionKind kind)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
std::pair< mlir::Value, mlir::Type > emitAsmInputLValue(const TargetInfo::ConstraintInfo &info, LValue inputValue, QualType inputType, std::string &constraintString, SourceLocation loc)
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
void emitOMPAllocateDecl(const OMPAllocateDecl &d)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType)
ImplicitParamDecl * cxxStructorImplicitParamDecl
When generating code for a constructor or destructor, this will hold the implicit argument (e....
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
void emitOMPDeclareMapper(const OMPDeclareMapperDecl &d)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
void startThunk(cir::FuncOp fn, GlobalDecl gd, const CIRGenFunctionInfo &fnInfo, bool isUnprototyped)
Start generating a thunk function.
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
LValue emitAggExprToLValue(const Expr *e)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
clang::CurrentSourceLocExprScope curSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
mlir::Value loadCXXVTT()
Load the VTT parameter to base constructors/destructors have virtual bases.
void emitVarDecl(const clang::VarDecl &d)
This method handles emission of any variable declaration inside a function, including static vars etc...
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitSVEPredicateCast(mlir::Value pred, unsigned minNumElts, mlir::Location loc)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
bool getAArch64SVEProcessedOperands(unsigned builtinID, const CallExpr *expr, SmallVectorImpl< mlir::Value > &ops, clang::SVETypeFlags typeFlags)
Address returnValue
The temporary alloca to hold the return value.
LValue makeAddrLValue(Address addr, QualType ty, LValueBaseInfo baseInfo)
void emitCallAndReturnForThunk(cir::FuncOp callee, const ThunkInfo *thunk, bool isUnprototyped)
Emit the call and return for a thunk function.
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr, const clang::CXXRecordDecl *vtableClass)
Return the Value of the vtable pointer member pointed to by thisAddr.
void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Destroys all the elements of the given array, beginning from last to first.
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
RValue emitAnyExprToTemp(const clang::Expr *e)
Similarly to emitAnyExpr(), however, the result will always be accessible even if no aggregate locati...
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, const clang::Stmt *thenS, const clang::Stmt *elseS)
void emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::SmallPtrSet< const clang::CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
bool hasVolatileMember(QualType t)
returns true if aggregate type has a volatile member.
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
void emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType)
clang::FieldDecl * lambdaThisCaptureField
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
void emitConstructorBody(FunctionArgList &args)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
LValue emitCallExprLValue(const clang::CallExpr *e)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
bool haveInsertPoint() const
True if an insertion point is defined.
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void maybeEmitDeferredVarDeclInit(const VarDecl *vd)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
std::optional< mlir::Value > emitAArch64SMEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void pushStackRestore(CleanupKind kind, Address spMem)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
void emitAutoVarDecl(const clang::VarDecl &d)
Emit code and set up symbol table for a variable declaration with auto, register, or no storage class...
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
bool didCallStackSave
Whether a cir.stacksave operation has been added.
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
mlir::Value emitOpenACCIntExpr(const Expr *intExpr)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer)
Immediately perform the destruction of the given object.
const CIRGenModule & getCIRGenModule() const
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, llvm::ArrayRef< mlir::Value > args={}, mlir::NamedAttrList attrs={})
void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty)
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
void emitAtomicInit(Expr *init, LValue dest)
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
mlir::Value createOpenACCConstantInt(mlir::Location loc, unsigned width, int64_t value)
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
void emitOMPThreadPrivateDecl(const OMPThreadPrivateDecl &d)
void emitOMPGroupPrivateDecl(const OMPGroupPrivateDecl &d)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
void emitCXXThrowExpr(const CXXThrowExpr *e)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
llvm::SmallVector< VPtr, 4 > VPtrsVector
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
bool sawAsmBlock
Whether or not a Microsoft-style asm block has been processed within this fuction.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
std::pair< mlir::Value, mlir::Type > emitAsmInput(const TargetInfo::ConstraintInfo &info, const Expr *inputExpr, std::string &constraintString)
EHScopeStack::stable_iterator currentCleanupStackDepth
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
void pushFullExprCleanup(CleanupKind kind, As... a)
Push a cleanup to be run at the end of the current full-expression.
void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, clang::SourceLocation loc)
We are performing a delegate call; that is, the current function is delegating to another one.
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
const CIRGenFunctionInfo * curFnInfo
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
std::optional< mlir::Value > emitAArch64SVEBuiltinExpr(unsigned builtinID, const CallExpr *expr)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
void emitOMPCapturedExpr(const OMPCapturedExprDecl &d)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args)
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, mlir::OpBuilder::InsertPoint ip, mlir::Value arraySize=nullptr)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
void finishThunk()
Finish generating a thunk function.
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
void emitAutoVarCleanups(const AutoVarEmission &emission)
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
A saved depth on the scope stack.
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a call to a CUDA kernel function.
Definition ExprCXX.h:235
CXXCatchStmt - This represents a C++ catch block.
Definition StmtCXX.h:28
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
Represents a C++ constructor within a class.
Definition DeclCXX.h:2611
Represents a C++ base or member initializer.
Definition DeclCXX.h:2376
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1271
A use of a default initializer in a constructor or in aggregate initialization.
Definition ExprCXX.h:1378
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2627
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:482
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2356
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:85
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2746
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
Represents a C++ temporary.
Definition ExprCXX.h:1460
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1209
CXXTryStmt - A C++ try block, including all handlers.
Definition StmtCXX.h:69
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
CaseStmt - Represent a case statement.
Definition Stmt.h:1912
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3608
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
ContinueStmt - This represents a continue.
Definition Stmt.h:3111
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
SourceLocExprScopeGuard(const Expr *DefaultExpr, CurrentSourceLocExprScope &Current)
Represents the current source location and context used to determine the value of the source location...
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1623
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2824
This represents one expression.
Definition Expr.h:112
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6609
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2880
Represents a function declaration or definition.
Definition Decl.h:2000
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5315
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2961
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3000
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3661
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents 'pragma omp allocate ...' directive.
Definition DeclOpenMP.h:536
Pseudo declaration for capturing expressions.
Definition DeclOpenMP.h:445
This represents 'pragma omp declare mapper ...' directive.
Definition DeclOpenMP.h:349
This represents 'pragma omp declare reduction ...' directive.
Definition DeclOpenMP.h:239
This represents 'pragma omp groupprivate ...' directive.
Definition DeclOpenMP.h:173
This represents 'pragma omp requires...' directive.
Definition DeclOpenMP.h:479
This represents 'pragma omp threadprivate ...' directive.
Definition DeclOpenMP.h:110
ObjCMethodDecl - Represents an instance or class method declaration.
Definition DeclObjC.h:140
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1231
Represents a parameter to a function.
Definition Decl.h:1790
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2008
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8428
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Flags to identify the types for overloaded SVE builtins.
Encodes a location in the source.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:226
Represents a declaration of a type.
Definition Decl.h:3513
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3974
#define bool
Definition gpuintrin.h:32
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
const internal::VariadicDynCastAllOfMatcher< Decl, VarDecl > varDecl
Matches variable declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
OpenACCDirectiveKind
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
OpenACCComputeConstruct(OpenACCDirectiveKind K, SourceLocation Start, SourceLocation DirectiveLoc, SourceLocation End, ArrayRef< const OpenACCClause * > Clauses, Stmt *StructuredBlock)
CXXDtorType
C++ destructor types.
Definition ABI.h:34
#define true
Definition stdbool.h:25
static bool aggValueSlot()
static bool peepholeProtection()
static bool opAllocaEscapeByReference()
static bool generateDebugInfo()
AutoVarEmission(const clang::VarDecl &variable)
bool isEscapingByRef
True if the variable is a __block variable that is captured by an escaping block.
Address addr
The address of the alloca for languages with explicit address space (e.g.
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
bool isConstantAggregate
True if the variable is of aggregate type and has a constant initializer.
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
Address getObjectAddress(CIRGenFunction &cgf) const
Returns the address of the object within this declaration.
std::unique_ptr< CGCoroData > data
CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
mlir::Block * createCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc)
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
void updateRetLoc(mlir::Block *b, mlir::Location loc)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
llvm::PointerUnion< const clang::FunctionProtoType *, const clang::ObjCMethodDecl * > p
PrototypeWrapper(const clang::ObjCMethodDecl *md)
PrototypeWrapper(const clang::FunctionProtoType *ft)
const clang::CXXRecordDecl * vtableClass
const clang::CXXRecordDecl * nearestVBase
VlaSizePair(mlir::Value num, QualType ty)
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition Thunk.h:157