clang 23.0.0git
CIRGenCoroutine.cpp
Go to the documentation of this file.
1//===----- CGCoroutine.cpp - Emit CIR Code for C++ coroutines -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with C++ code generation of coroutines.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14#include "mlir/Support/LLVM.h"
15#include "clang/AST/StmtCXX.h"
20
21using namespace clang;
22using namespace clang::CIRGen;
23
25 // What is the current await expression kind and how many
26 // await/yield expressions were encountered so far.
27 // These are used to generate pretty labels for await expressions in LLVM IR.
28 cir::AwaitKind currentAwaitKind = cir::AwaitKind::Init;
29 // Stores the __builtin_coro_id emitted in the function so that we can supply
30 // it as the first argument to other builtins.
31 cir::CallOp coroId = nullptr;
32
33 // Stores the result of __builtin_coro_begin call.
34 mlir::Value coroBegin = nullptr;
35
36 // Stores the insertion point for final suspend, this happens after the
37 // promise call (return_xxx promise member) but before a cir.br to the return
38 // block.
39 mlir::Operation *finalSuspendInsPoint;
40
41 // How many co_return statements are in the coroutine. Used to decide whether
42 // we need to add co_return; equivalent at the end of the user authored body.
43 unsigned coreturnCount = 0;
44
45 // The promise type's 'unhandled_exception' handler, if it defines one.
47};
48
49// Defining these here allows to keep CGCoroData private to this file.
52
53namespace {
54// FIXME: both GetParamRef and ParamReferenceReplacerRAII are good template
55// candidates to be shared among LLVM / CIR codegen.
56
57// Hunts for the parameter reference in the parameter copy/move declaration.
58struct GetParamRef : public StmtVisitor<GetParamRef> {
59public:
60 DeclRefExpr *expr = nullptr;
61 GetParamRef() {}
62 void VisitDeclRefExpr(DeclRefExpr *e) {
63 assert(expr == nullptr && "multilple declref in param move");
64 expr = e;
65 }
66 void VisitStmt(Stmt *s) {
67 for (Stmt *c : s->children()) {
68 if (c)
69 Visit(c);
70 }
71 }
72};
73
74// This class replaces references to parameters to their copies by changing
75// the addresses in CGF.LocalDeclMap and restoring back the original values in
76// its destructor.
77struct ParamReferenceReplacerRAII {
78 CIRGenFunction::DeclMapTy savedLocals;
79 CIRGenFunction::DeclMapTy &localDeclMap;
80
81 ParamReferenceReplacerRAII(CIRGenFunction::DeclMapTy &localDeclMap)
82 : localDeclMap(localDeclMap) {}
83
84 void addCopy(const DeclStmt *pm) {
85 // Figure out what param it refers to.
86
87 assert(pm->isSingleDecl());
88 const VarDecl *vd = static_cast<const VarDecl *>(pm->getSingleDecl());
89 const Expr *initExpr = vd->getInit();
90 GetParamRef visitor;
91 visitor.Visit(const_cast<Expr *>(initExpr));
92 assert(visitor.expr);
93 DeclRefExpr *dreOrig = visitor.expr;
94 auto *pd = dreOrig->getDecl();
95
96 auto it = localDeclMap.find(pd);
97 assert(it != localDeclMap.end() && "parameter is not found");
98 savedLocals.insert({pd, it->second});
99
100 auto copyIt = localDeclMap.find(vd);
101 assert(copyIt != localDeclMap.end() && "parameter copy is not found");
102 it->second = copyIt->getSecond();
103 }
104
105 ~ParamReferenceReplacerRAII() {
106 for (auto &&savedLocal : savedLocals) {
107 localDeclMap.insert({savedLocal.first, savedLocal.second});
108 }
109 }
110};
111} // namespace
112
114 if (curCoro.data && curCoro.data->coroBegin) {
115 return RValue::get(curCoro.data->coroBegin);
116 }
117 cgm.errorNYI("NYI");
118 return RValue();
119}
120
123 cir::CallOp coroId) {
124 assert(!curCoro.data && "EmitCoroutineBodyStatement called twice?");
125
126 curCoro.data = std::make_unique<CGCoroData>();
127 curCoro.data->coroId = coroId;
128}
129
130static mlir::LogicalResult
132 Stmt *body,
133 const CIRGenFunction::LexicalScope *currLexScope) {
134 if (cgf.emitStmt(body, /*useCurrentScope=*/true).failed())
135 return mlir::failure();
136 // Note that classic codegen checks CanFallthrough by looking into the
137 // availability of the insert block which is kinda brittle and unintuitive,
138 // seems to be related with how landing pads are handled.
139 //
140 // CIRGen handles this by checking pre-existing co_returns in the current
141 // scope instead.
142
143 // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock();
144 const bool canFallthrough = !currLexScope->hasCoreturn();
145 if (canFallthrough)
146 if (Stmt *onFallthrough = s.getFallthroughHandler())
147 if (cgf.emitStmt(onFallthrough, /*useCurrentScope=*/true).failed())
148 return mlir::failure();
149
150 return mlir::success();
151}
152
153cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc,
154 mlir::Value nullPtr) {
155 cir::IntType int32Ty = builder.getUInt32Ty();
156
157 const TargetInfo &ti = cgm.getASTContext().getTargetInfo();
158 unsigned newAlign = ti.getNewAlign() / ti.getCharWidth();
159
160 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroId);
161
162 cir::FuncOp fnOp;
163 if (!builtin) {
164 fnOp = cgm.createCIRBuiltinFunction(
165 loc, cgm.builtinCoroId,
166 cir::FuncType::get({int32Ty, voidPtrTy, voidPtrTy, voidPtrTy}, int32Ty),
167 /*FD=*/nullptr);
168 assert(fnOp && "should always succeed");
169 } else {
170 fnOp = cast<cir::FuncOp>(builtin);
171 }
172
173 return builder.createCallOp(loc, fnOp,
174 mlir::ValueRange{builder.getUInt32(newAlign, loc),
175 nullPtr, nullPtr, nullPtr});
176}
177
178cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) {
179 cir::BoolType boolTy = builder.getBoolTy();
180
181 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroAlloc);
182
183 cir::FuncOp fnOp;
184 if (!builtin) {
185 fnOp = cgm.createCIRBuiltinFunction(loc, cgm.builtinCoroAlloc,
186 cir::FuncType::get({uInt32Ty}, boolTy),
187 /*fd=*/nullptr);
188 assert(fnOp && "should always succeed");
189 } else {
190 fnOp = cast<cir::FuncOp>(builtin);
191 }
192
193 return builder.createCallOp(
194 loc, fnOp, mlir::ValueRange{curCoro.data->coroId.getResult()});
195}
196
197cir::CallOp
199 mlir::Value coroframeAddr) {
200 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroBegin);
201
202 cir::FuncOp fnOp;
203 if (!builtin) {
204 fnOp = cgm.createCIRBuiltinFunction(
205 loc, cgm.builtinCoroBegin,
206 cir::FuncType::get({uInt32Ty, voidPtrTy}, voidPtrTy),
207 /*fd=*/nullptr);
208 assert(fnOp && "should always succeed");
209 } else {
210 fnOp = cast<cir::FuncOp>(builtin);
211 }
212
213 return builder.createCallOp(
214 loc, fnOp,
215 mlir::ValueRange{curCoro.data->coroId.getResult(), coroframeAddr});
216}
217
218cir::CallOp CIRGenFunction::emitCoroEndBuiltinCall(mlir::Location loc,
219 mlir::Value nullPtr) {
220 cir::BoolType boolTy = builder.getBoolTy();
221 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroEnd);
222
223 cir::FuncOp fnOp;
224 if (!builtin) {
225 fnOp = cgm.createCIRBuiltinFunction(
226 loc, cgm.builtinCoroEnd,
227 cir::FuncType::get({voidPtrTy, boolTy}, boolTy),
228 /*fd=*/nullptr);
229 assert(fnOp && "should always succeed");
230 } else {
231 fnOp = cast<cir::FuncOp>(builtin);
232 }
233
234 return builder.createCallOp(
235 loc, fnOp, mlir::ValueRange{nullPtr, builder.getBool(false, loc)});
236}
237
238mlir::LogicalResult
240 mlir::Location openCurlyLoc = getLoc(s.getBeginLoc());
241 cir::ConstantOp nullPtrCst = builder.getNullPtr(voidPtrTy, openCurlyLoc);
242
243 auto fn = mlir::cast<cir::FuncOp>(curFn);
244 fn.setCoroutine(true);
245 cir::CallOp coroId = emitCoroIDBuiltinCall(openCurlyLoc, nullPtrCst);
246 createCoroData(*this, curCoro, coroId);
247
248 // Backend is allowed to elide memory allocations, to help it, emit
249 // auto mem = coro.alloc() ? 0 : ... allocation code ...;
250 cir::CallOp coroAlloc = emitCoroAllocBuiltinCall(openCurlyLoc);
251
252 // Initialize address of coroutine frame to null
253 CanQualType astVoidPtrTy = cgm.getASTContext().VoidPtrTy;
254 mlir::Type allocaTy = convertTypeForMem(astVoidPtrTy);
255 Address coroFrame =
256 createTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy),
257 openCurlyLoc, "__coro_frame_addr",
258 /*ArraySize=*/nullptr);
259
260 mlir::Value storeAddr = coroFrame.getPointer();
261 builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr);
262 cir::IfOp::create(
263 builder, openCurlyLoc, coroAlloc.getResult(),
264 /*withElseRegion=*/false,
265 /*thenBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
266 builder.CIRBaseBuilderTy::createStore(
267 loc, emitScalarExpr(s.getAllocate()), storeAddr);
268 cir::YieldOp::create(builder, loc);
269 });
270 curCoro.data->coroBegin =
272 openCurlyLoc,
273 cir::LoadOp::create(builder, openCurlyLoc, allocaTy, storeAddr))
274 .getResult();
275
276 // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
277 if (s.getReturnStmtOnAllocFailure())
278 cgm.errorNYI("handle coroutine return alloc failure");
279
280 {
282 ParamReferenceReplacerRAII paramReplacer(localDeclMap);
283 // Create mapping between parameters and copy-params for coroutine
284 // function.
285 llvm::ArrayRef<const Stmt *> paramMoves = s.getParamMoves();
286 assert((paramMoves.size() == 0 || (paramMoves.size() == fnArgs.size())) &&
287 "ParamMoves and FnArgs should be the same size for coroutine "
288 "function");
289 // For zipping the arg map into debug info.
291
292 // Create parameter copies. We do it before creating a promise, since an
293 // evolution of coroutine TS may allow promise constructor to observe
294 // parameter copies.
296 for (auto *pm : paramMoves) {
297 if (emitStmt(pm, /*useCurrentScope=*/true).failed())
298 return mlir::failure();
299 paramReplacer.addCopy(cast<DeclStmt>(pm));
300 }
301
302 if (emitStmt(s.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed())
303 return mlir::failure();
304 // returnValue should be valid as long as the coroutine's return type
305 // is not void. The assertion could help us to reduce the check later.
306 assert(returnValue.isValid() == (bool)s.getReturnStmt());
307 // Now we have the promise, initialize the GRO.
308 // We need to emit `get_return_object` first. According to:
309 // [dcl.fct.def.coroutine]p7
310 // The call to get_return_­object is sequenced before the call to
311 // initial_suspend and is invoked at most once.
312 //
313 // So we couldn't emit return value when we emit return statment,
314 // otherwise the call to get_return_object wouldn't be in front
315 // of initial_suspend.
316 if (returnValue.isValid())
317 emitAnyExprToMem(s.getReturnValue(), returnValue,
318 s.getReturnValue()->getType().getQualifiers(),
319 /*isInit*/ true);
320
322
323 curCoro.data->currentAwaitKind = cir::AwaitKind::Init;
324 if (emitStmt(s.getInitSuspendStmt(), /*useCurrentScope=*/true).failed())
325 return mlir::failure();
326
327 curCoro.data->currentAwaitKind = cir::AwaitKind::User;
328
329 // FIXME(cir): wrap emitBodyAndFallthrough with try/catch bits.
330 if (s.getExceptionHandler())
332 if (emitBodyAndFallthrough(*this, s, s.getBody(), curLexScope).failed())
333 return mlir::failure();
334
335 // Note that LLVM checks CanFallthrough by looking into the availability
336 // of the insert block which is kinda brittle and unintuitive, seems to be
337 // related with how landing pads are handled.
338 //
339 // CIRGen handles this by checking pre-existing co_returns in the current
340 // scope instead.
341 //
342 // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock();
343 const bool canFallthrough = curLexScope->hasCoreturn();
344 const bool hasCoreturns = curCoro.data->coreturnCount > 0;
345 if (canFallthrough || hasCoreturns) {
346 curCoro.data->currentAwaitKind = cir::AwaitKind::Final;
347 {
348 mlir::OpBuilder::InsertionGuard guard(builder);
349 builder.setInsertionPoint(curCoro.data->finalSuspendInsPoint);
350 if (emitStmt(s.getFinalSuspendStmt(), /*useCurrentScope=*/true)
351 .failed())
352 return mlir::failure();
353 }
354 }
355 }
356 return mlir::success();
357}
358
359static bool memberCallExpressionCanThrow(const Expr *e) {
360 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
361 if (const auto *proto =
362 ce->getMethodDecl()->getType()->getAs<FunctionProtoType>())
363 if (isNoexceptExceptionSpec(proto->getExceptionSpecType()) &&
364 proto->canThrow() == CT_Cannot)
365 return false;
366 return true;
367}
368
369// Given a suspend expression which roughly looks like:
370//
371// auto && x = CommonExpr();
372// if (!x.await_ready()) {
373// x.await_suspend(...); (*)
374// }
375// x.await_resume();
376//
377// where the result of the entire expression is the result of x.await_resume()
378//
379// (*) If x.await_suspend return type is bool, it allows to veto a suspend:
380// if (x.await_suspend(...))
381// llvm_coro_suspend();
382//
383// This is more higher level than LLVM codegen, for that one see llvm's
384// docs/Coroutines.rst for more details.
385namespace {
386struct LValueOrRValue {
387 LValue lv;
388 RValue rv;
389};
390} // namespace
391
392static LValueOrRValue
394 CoroutineSuspendExpr const &s, cir::AwaitKind kind,
395 AggValueSlot aggSlot, bool ignoreResult,
396 mlir::Block *scopeParentBlock,
397 mlir::Value &tmpResumeRValAddr, bool forLValue) {
398 [[maybe_unused]] mlir::LogicalResult awaitBuild = mlir::success();
399 LValueOrRValue awaitRes;
400
402 CIRGenFunction::OpaqueValueMapping(cgf, s.getOpaqueValue());
403 CIRGenBuilderTy &builder = cgf.getBuilder();
404 [[maybe_unused]] cir::AwaitOp awaitOp = cir::AwaitOp::create(
405 builder, cgf.getLoc(s.getSourceRange()), kind,
406 /*readyBuilder=*/
407 [&](mlir::OpBuilder &b, mlir::Location loc) {
408 Expr *condExpr = s.getReadyExpr()->IgnoreParens();
409 builder.createCondition(cgf.evaluateExprAsBool(condExpr));
410 },
411 /*suspendBuilder=*/
412 [&](mlir::OpBuilder &b, mlir::Location loc) {
413 // Note that differently from LLVM codegen we do not emit coro.save
414 // and coro.suspend here, that should be done as part of lowering this
415 // to LLVM dialect (or some other MLIR dialect)
416
417 // A invalid suspendRet indicates "void returning await_suspend"
418 mlir::Value suspendRet = cgf.emitScalarExpr(s.getSuspendExpr());
419
420 // Veto suspension if requested by bool returning await_suspend.
421 if (suspendRet) {
422 cgf.cgm.errorNYI("Veto await_suspend");
423 }
424
425 // Signals the parent that execution flows to next region.
426 cir::YieldOp::create(builder, loc);
427 },
428 /*resumeBuilder=*/
429 [&](mlir::OpBuilder &b, mlir::Location loc) {
430 // Exception handling requires additional IR. If the 'await_resume'
431 // function is marked as 'noexcept', we avoid generating this additional
432 // IR.
433 CXXTryStmt *tryStmt = nullptr;
434 if (coro.exceptionHandler && kind == cir::AwaitKind::Init &&
435 memberCallExpressionCanThrow(s.getResumeExpr()))
436 cgf.cgm.errorNYI("Coro resume Exception");
437
438 // FIXME(cir): the alloca for the resume expr should be placed in the
439 // enclosing cir.scope instead.
440 if (forLValue) {
441 awaitRes.lv = cgf.emitLValue(s.getResumeExpr());
442 } else {
443 awaitRes.rv =
444 cgf.emitAnyExpr(s.getResumeExpr(), aggSlot, ignoreResult);
445 if (!awaitRes.rv.isIgnored()) {
446 // Create the alloca in the block before the scope wrapping
447 // cir.await.
448 tmpResumeRValAddr = cgf.emitAlloca(
449 "__coawait_resume_rval", awaitRes.rv.getValue().getType(), loc,
451 builder.getBestAllocaInsertPoint(scopeParentBlock));
452 // Store the rvalue so we can reload it before the promise call.
453 builder.CIRBaseBuilderTy::createStore(loc, awaitRes.rv.getValue(),
454 tmpResumeRValAddr);
455 }
456 }
457
458 if (tryStmt)
459 cgf.cgm.errorNYI("Coro tryStmt");
460
461 // Returns control back to parent.
462 cir::YieldOp::create(builder, loc);
463 });
464
465 assert(awaitBuild.succeeded() && "Should know how to codegen");
466 return awaitRes;
467}
468
470 const CoroutineSuspendExpr &e,
471 cir::AwaitKind kind, AggValueSlot aggSlot,
472 bool ignoreResult) {
473 RValue rval;
474 mlir::Location scopeLoc = cgf.getLoc(e.getSourceRange());
475
476 // Since we model suspend / resume as an inner region, we must store
477 // resume scalar results in a tmp alloca, and load it after we build the
478 // suspend expression. An alternative way to do this would be to make
479 // every region return a value when promise.return_value() is used, but
480 // it's a bit awkward given that resume is the only region that actually
481 // returns a value.
482 mlir::Block *currEntryBlock = cgf.curLexScope->getEntryBlock();
483 [[maybe_unused]] mlir::Value tmpResumeRValAddr;
484
485 // No need to explicitly wrap this into a scope since the AST already uses a
486 // ExprWithCleanups, which will wrap this into a cir.scope anyways.
487 rval = emitSuspendExpression(cgf, *cgf.curCoro.data, e, kind, aggSlot,
488 ignoreResult, currEntryBlock, tmpResumeRValAddr,
489 /*forLValue*/ false)
490 .rv;
491
492 if (ignoreResult || rval.isIgnored())
493 return rval;
494
495 if (rval.isScalar()) {
496 rval = RValue::get(cir::LoadOp::create(cgf.getBuilder(), scopeLoc,
497 rval.getValue().getType(),
498 tmpResumeRValAddr));
499 } else if (rval.isAggregate()) {
500 // This is probably already handled via AggSlot, remove this assertion
501 // once we have a testcase and prove all pieces work.
502 cgf.cgm.errorNYI("emitSuspendExpr Aggregate");
503 } else { // complex
504 cgf.cgm.errorNYI("emitSuspendExpr Complex");
505 }
506 return rval;
507}
508
510 AggValueSlot aggSlot,
511 bool ignoreResult) {
512 return emitSuspendExpr(*this, e, curCoro.data->currentAwaitKind, aggSlot,
513 ignoreResult);
514}
515
517 AggValueSlot aggSlot,
518 bool ignoreResult) {
519 return emitSuspendExpr(*this, e, cir::AwaitKind::Yield, aggSlot,
520 ignoreResult);
521}
522
524 ++curCoro.data->coreturnCount;
525 curLexScope->setCoreturn();
526
527 const Expr *rv = s.getOperand();
528 if (rv && rv->getType()->isVoidType() && !isa<InitListExpr>(rv)) {
529 // Make sure to evaluate the non initlist expression of a co_return
530 // with a void expression for side effects.
531 RunCleanupsScope cleanupScope(*this);
532 emitIgnoredExpr(rv);
533 }
534
535 if (emitStmt(s.getPromiseCall(), /*useCurrentScope=*/true).failed())
536 return mlir::failure();
537 // Create a new return block (if not existent) and add a branch to
538 // it. The actual return instruction is only inserted during current
539 // scope cleanup handling.
540 mlir::Location loc = getLoc(s.getSourceRange());
541 mlir::Block *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
542 curCoro.data->finalSuspendInsPoint =
543 cir::BrOp::create(builder, loc, retBlock);
544
545 // Insert the new block to continue codegen after branch to ret block,
546 // this will likely be an empty block.
547 builder.createBlock(builder.getBlock()->getParent());
548
549 return mlir::success();
550}
static LValueOrRValue emitSuspendExpression(CIRGenFunction &cgf, CGCoroData &coro, CoroutineSuspendExpr const &s, cir::AwaitKind kind, AggValueSlot aggSlot, bool ignoreResult, mlir::Block *scopeParentBlock, mlir::Value &tmpResumeRValAddr, bool forLValue)
static RValue emitSuspendExpr(CIRGenFunction &cgf, const CoroutineSuspendExpr &e, cir::AwaitKind kind, AggValueSlot aggSlot, bool ignoreResult)
static bool memberCallExpressionCanThrow(const Expr *e)
static mlir::LogicalResult emitBodyAndFallthrough(CIRGenFunction &cgf, const CoroutineBodyStmt &s, Stmt *body, const CIRGenFunction::LexicalScope *currLexScope)
static void createCoroData(CIRGenFunction &cgf, CIRGenFunction::CGCoroInfo &curCoro, cir::CallOp coroId)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
mlir::Value getPointer() const
Definition Address.h:95
An aggregate value slot.
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitCoyieldExpr(const CoyieldExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Type convertTypeForMem(QualType t)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
Address returnValue
The temporary alloca to hold the return value.
CIRGenBuilderTy & getBuilder()
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
clang::ASTContext & getContext() const
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
bool isIgnored() const
Definition CIRGenValue.h:52
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
Represents a 'co_await' expression.
Definition ExprCXX.h:5369
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
Represents an expression that might suspend coroutine execution; either a co_await or co_yield expres...
Definition ExprCXX.h:5255
Represents a 'co_yield' expression.
Definition ExprCXX.h:5450
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
ValueDecl * getDecl()
Definition Expr.h:1338
bool isSingleDecl() const
isSingleDecl - This method returns true if this DeclStmt refers to a single Decl.
Definition Stmt.h:1636
const Decl * getSingleDecl() const
Definition Stmt.h:1638
This represents one expression.
Definition Expr.h:112
QualType getType() const
Definition Expr.h:144
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Exposes information about the current target.
Definition TargetInfo.h:226
unsigned getNewAlign() const
Return the largest alignment for which a suitably-sized allocation with 'operator new(size_t)' is gua...
Definition TargetInfo.h:766
unsigned getCharWidth() const
Definition TargetInfo.h:520
bool isVoidType() const
Definition TypeBase.h:8891
const Expr * getInit() const
Definition Decl.h:1368
Defines the clang::TargetInfo interface.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isNoexceptExceptionSpec(ExceptionSpecificationType ESpecType)
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool ehCleanupScope()
static bool coroutineExceptions()
static bool coroOutsideFrameMD()
static bool generateDebugInfo()
mlir::Operation * finalSuspendInsPoint
std::unique_ptr< CGCoroData > data
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
cir::PointerType voidPtrTy
void* in address space 0