clang 23.0.0git
CIRGenCoroutine.cpp
Go to the documentation of this file.
1//===----- CGCoroutine.cpp - Emit CIR Code for C++ coroutines -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with C++ code generation of coroutines.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14#include "mlir/Support/LLVM.h"
15#include "clang/AST/StmtCXX.h"
20
21using namespace clang;
22using namespace clang::CIRGen;
23
25 // What is the current await expression kind and how many
26 // await/yield expressions were encountered so far.
27 // These are used to generate pretty labels for await expressions in LLVM IR.
28 cir::AwaitKind currentAwaitKind = cir::AwaitKind::Init;
29 // Stores the __builtin_coro_id emitted in the function so that we can supply
30 // it as the first argument to other builtins.
31 cir::CallOp coroId = nullptr;
32
33 // Stores the result of __builtin_coro_begin call.
34 mlir::Value coroBegin = nullptr;
35
36 // How many co_return statements are in the coroutine. Used to decide whether
37 // we need to add co_return; equivalent at the end of the user authored body.
38 unsigned coreturnCount = 0;
39
40 // The promise type's 'unhandled_exception' handler, if it defines one.
42
43 // Stores the last emitted coro.free for the deallocate expressions, we use it
44 // to wrap dealloc code with if(auto mem = coro.free) dealloc(mem).
45 cir::CallOp lastCoroFree = nullptr;
46};
47
48// Defining these here allows to keep CGCoroData private to this file.
51
52namespace {
53// FIXME: both GetParamRef and ParamReferenceReplacerRAII are good template
54// candidates to be shared among LLVM / CIR codegen.
55
56// Hunts for the parameter reference in the parameter copy/move declaration.
57struct GetParamRef : public StmtVisitor<GetParamRef> {
58public:
59 DeclRefExpr *expr = nullptr;
60 GetParamRef() {}
61 void VisitDeclRefExpr(DeclRefExpr *e) {
62 assert(expr == nullptr && "multilple declref in param move");
63 expr = e;
64 }
65 void VisitStmt(Stmt *s) {
66 for (Stmt *c : s->children()) {
67 if (c)
68 Visit(c);
69 }
70 }
71};
72
73// This class replaces references to parameters to their copies by changing
74// the addresses in CGF.LocalDeclMap and restoring back the original values in
75// its destructor.
76struct ParamReferenceReplacerRAII {
77 CIRGenFunction::DeclMapTy savedLocals;
78 CIRGenFunction::DeclMapTy &localDeclMap;
79
80 ParamReferenceReplacerRAII(CIRGenFunction::DeclMapTy &localDeclMap)
81 : localDeclMap(localDeclMap) {}
82
83 void addCopy(const DeclStmt *pm) {
84 // Figure out what param it refers to.
85
86 assert(pm->isSingleDecl());
87 const VarDecl *vd = static_cast<const VarDecl *>(pm->getSingleDecl());
88 const Expr *initExpr = vd->getInit();
89 GetParamRef visitor;
90 visitor.Visit(const_cast<Expr *>(initExpr));
91 assert(visitor.expr);
92 DeclRefExpr *dreOrig = visitor.expr;
93 auto *pd = dreOrig->getDecl();
94
95 auto it = localDeclMap.find(pd);
96 assert(it != localDeclMap.end() && "parameter is not found");
97 savedLocals.insert({pd, it->second});
98
99 auto copyIt = localDeclMap.find(vd);
100 assert(copyIt != localDeclMap.end() && "parameter copy is not found");
101 it->second = copyIt->getSecond();
102 }
103
104 ~ParamReferenceReplacerRAII() {
105 for (auto &&savedLocal : savedLocals) {
106 localDeclMap.insert({savedLocal.first, savedLocal.second});
107 }
108 }
109};
110} // namespace
111
112namespace {
113// Make sure to call coro.delete on scope exit.
114struct CallCoroDelete final : public EHScopeStack::Cleanup {
115 Stmt *deallocate;
116
117 // Emit "if (coro.free(CoroId, CoroBegin)) Deallocate;"
118
119 // Note: That deallocation will be emitted twice: once for a normal exit and
120 // once for exceptional exit. This usage is safe because Deallocate does not
121 // contain any declarations. The SubStmtBuilder::makeNewAndDeleteExpr()
122 // builds a single call to a deallocation function which is safe to emit
123 // multiple times.
124 void emit(CIRGenFunction &cgf, Flags) override {
125 // Remember the current point, as we are going to emit deallocation code
126 // first to get to coro.free instruction that is an argument to a delete
127 // call.
128
129 if (cgf.emitStmt(deallocate, /*useCurrentScope=*/true).failed()) {
130 cgf.cgm.error(deallocate->getBeginLoc(),
131 "failed to emit coroutine deallocation expression");
132 return;
133 }
134
135 CIRGenBuilderTy &builder = cgf.getBuilder();
136 cir::CallOp coroFree = cgf.curCoro.data->lastCoroFree;
137
138 if (!coroFree) {
139 cgf.cgm.error(deallocate->getBeginLoc(),
140 "Deallocation expression does not refer to coro.free");
141 return;
142 }
143
144 builder.setInsertionPointAfter(coroFree);
145 mlir::Value isPtrNotNull = builder.createPtrIsNotNull(coroFree.getResult());
146
147 llvm::SmallVector<mlir::Operation *> opsToMove;
148 mlir::Block *block = builder.getInsertionBlock();
149 mlir::Block::iterator it(isPtrNotNull.getDefiningOp());
150
151 for (++it; it != block->end(); ++it)
152 opsToMove.push_back(&*it);
153
154 auto ifOp =
155 cir::IfOp::create(builder, cgf.getLoc(deallocate->getSourceRange()),
156 isPtrNotNull, /*withElseRegion*/ false,
157 [&](mlir::OpBuilder &builder, mlir::Location loc) {
158 cir::YieldOp::create(builder, loc);
159 });
160
161 mlir::Operation *yieldOp = ifOp.getThenRegion().back().getTerminator();
162 for (auto *op : opsToMove)
163 op->moveBefore(yieldOp);
164 }
165 explicit CallCoroDelete(Stmt *deallocStmt) : deallocate(deallocStmt) {}
166};
167} // namespace
168
170 if (curCoro.data && curCoro.data->coroBegin) {
171 return RValue::get(curCoro.data->coroBegin);
172 }
173 cgm.errorNYI("NYI");
174 return RValue();
175}
176
179 cir::CallOp coroId) {
180 assert(!curCoro.data && "EmitCoroutineBodyStatement called twice?");
181
182 curCoro.data = std::make_unique<CGCoroData>();
183 curCoro.data->coroId = coroId;
184}
185
186static mlir::LogicalResult
188 Stmt *body,
189 const CIRGenFunction::LexicalScope *currLexScope) {
190 if (cgf.emitStmt(body, /*useCurrentScope=*/true).failed())
191 return mlir::failure();
192 // Note that classic codegen checks CanFallthrough by looking into the
193 // availability of the insert block which is kinda brittle and unintuitive,
194 // seems to be related with how landing pads are handled.
195 //
196 // CIRGen handles this by checking pre-existing co_returns in the current
197 // scope instead.
198
199 // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock();
200 const bool canFallthrough = !currLexScope->hasCoreturn();
201 if (canFallthrough)
202 if (Stmt *onFallthrough = s.getFallthroughHandler())
203 if (cgf.emitStmt(onFallthrough, /*useCurrentScope=*/true).failed())
204 return mlir::failure();
205
206 return mlir::success();
207}
208
209cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc,
210 mlir::Value nullPtr) {
211 cir::IntType int32Ty = builder.getUInt32Ty();
212
213 const TargetInfo &ti = cgm.getASTContext().getTargetInfo();
214 unsigned newAlign = ti.getNewAlign() / ti.getCharWidth();
215
216 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroId);
217
218 cir::FuncOp fnOp;
219 if (!builtin) {
220 fnOp = cgm.createCIRBuiltinFunction(
221 loc, cgm.builtinCoroId,
222 cir::FuncType::get({int32Ty, voidPtrTy, voidPtrTy, voidPtrTy}, int32Ty),
223 /*FD=*/nullptr);
224 assert(fnOp && "should always succeed");
225 } else {
226 fnOp = cast<cir::FuncOp>(builtin);
227 }
228
229 return builder.createCallOp(loc, fnOp,
230 mlir::ValueRange{builder.getUInt32(newAlign, loc),
231 nullPtr, nullPtr, nullPtr});
232}
233
234cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) {
235 cir::BoolType boolTy = builder.getBoolTy();
236
237 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroAlloc);
238
239 cir::FuncOp fnOp;
240 if (!builtin) {
241 fnOp = cgm.createCIRBuiltinFunction(loc, cgm.builtinCoroAlloc,
242 cir::FuncType::get({uInt32Ty}, boolTy),
243 /*fd=*/nullptr);
244 assert(fnOp && "should always succeed");
245 } else {
246 fnOp = cast<cir::FuncOp>(builtin);
247 }
248
249 return builder.createCallOp(
250 loc, fnOp, mlir::ValueRange{curCoro.data->coroId.getResult()});
251}
252
253cir::CallOp
255 mlir::Value coroframeAddr) {
256 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroBegin);
257
258 cir::FuncOp fnOp;
259 if (!builtin) {
260 fnOp = cgm.createCIRBuiltinFunction(
261 loc, cgm.builtinCoroBegin,
262 cir::FuncType::get({uInt32Ty, voidPtrTy}, voidPtrTy),
263 /*fd=*/nullptr);
264 assert(fnOp && "should always succeed");
265 } else {
266 fnOp = cast<cir::FuncOp>(builtin);
267 }
268
269 return builder.createCallOp(
270 loc, fnOp,
271 mlir::ValueRange{curCoro.data->coroId.getResult(), coroframeAddr});
272}
273
274cir::CallOp CIRGenFunction::emitCoroEndBuiltinCall(mlir::Location loc,
275 mlir::Value nullPtr) {
276 cir::BoolType boolTy = builder.getBoolTy();
277 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroEnd);
278
279 cir::FuncOp fnOp;
280 if (!builtin) {
281 fnOp = cgm.createCIRBuiltinFunction(
282 loc, cgm.builtinCoroEnd,
283 cir::FuncType::get({voidPtrTy, boolTy}, boolTy),
284 /*fd=*/nullptr);
285 assert(fnOp && "should always succeed");
286 } else {
287 fnOp = cast<cir::FuncOp>(builtin);
288 }
289
290 return builder.createCallOp(
291 loc, fnOp, mlir::ValueRange{nullPtr, builder.getBool(false, loc)});
292}
293
295 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroFree);
296 mlir::Location loc = getLoc(e->getBeginLoc());
297 cir::FuncOp fnOp;
298 if (!builtin) {
299 fnOp = cgm.createCIRBuiltinFunction(
300 loc, cgm.builtinCoroFree,
301 cir::FuncType::get({uInt32Ty, voidPtrTy}, voidPtrTy),
302 /*fd=*/nullptr);
303 assert(fnOp && "should always succeed");
304 } else {
305 fnOp = cast<cir::FuncOp>(builtin);
306 }
307 cir::CallOp coroFree =
308 builder.createCallOp(loc, fnOp,
309 mlir::ValueRange{curCoro.data->coroId.getResult(),
310 curCoro.data->coroBegin});
311
312 curCoro.data->lastCoroFree = coroFree;
313 return coroFree;
314}
315
316mlir::LogicalResult
318 mlir::Location openCurlyLoc = getLoc(s.getBeginLoc());
319 cir::ConstantOp nullPtrCst = builder.getNullPtr(voidPtrTy, openCurlyLoc);
320
321 auto fn = mlir::cast<cir::FuncOp>(curFn);
322 fn.setCoroutine(true);
323 cir::CallOp coroId = emitCoroIDBuiltinCall(openCurlyLoc, nullPtrCst);
324 createCoroData(*this, curCoro, coroId);
325
326 // Backend is allowed to elide memory allocations, to help it, emit
327 // auto mem = coro.alloc() ? 0 : ... allocation code ...;
328 cir::CallOp coroAlloc = emitCoroAllocBuiltinCall(openCurlyLoc);
329
330 // Initialize address of coroutine frame to null
331 CanQualType astVoidPtrTy = cgm.getASTContext().VoidPtrTy;
332 mlir::Type allocaTy = convertTypeForMem(astVoidPtrTy);
333 Address coroFrame =
334 createTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy),
335 openCurlyLoc, "__coro_frame_addr",
336 /*ArraySize=*/nullptr);
337
338 mlir::Value storeAddr = coroFrame.getPointer();
339 builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr);
340 cir::IfOp::create(
341 builder, openCurlyLoc, coroAlloc.getResult(),
342 /*withElseRegion=*/false,
343 /*thenBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
344 builder.CIRBaseBuilderTy::createStore(
345 loc, emitScalarExpr(s.getAllocate()), storeAddr);
346 cir::YieldOp::create(builder, loc);
347 });
348 curCoro.data->coroBegin =
350 openCurlyLoc,
351 cir::LoadOp::create(builder, openCurlyLoc, allocaTy, storeAddr))
352 .getResult();
353
354 // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
355 if (s.getReturnStmtOnAllocFailure())
356 cgm.errorNYI("handle coroutine return alloc failure");
357
358 {
360 ParamReferenceReplacerRAII paramReplacer(localDeclMap);
361 RunCleanupsScope resumeScope(*this);
362 ehStack.pushCleanup<CallCoroDelete>(NormalAndEHCleanup, s.getDeallocate());
363 // Create mapping between parameters and copy-params for coroutine
364 // function.
365 llvm::ArrayRef<const Stmt *> paramMoves = s.getParamMoves();
366 assert((paramMoves.size() == 0 || (paramMoves.size() == fnArgs.size())) &&
367 "ParamMoves and FnArgs should be the same size for coroutine "
368 "function");
369 // For zipping the arg map into debug info.
371
372 // Create parameter copies. We do it before creating a promise, since an
373 // evolution of coroutine TS may allow promise constructor to observe
374 // parameter copies.
376 for (auto *pm : paramMoves) {
377 if (emitStmt(pm, /*useCurrentScope=*/true).failed())
378 return mlir::failure();
379 paramReplacer.addCopy(cast<DeclStmt>(pm));
380 }
381
382 if (emitStmt(s.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed())
383 return mlir::failure();
384 // returnValue should be valid as long as the coroutine's return type
385 // is not void. The assertion could help us to reduce the check later.
386 assert(returnValue.isValid() == (bool)s.getReturnStmt());
387 // Now we have the promise, initialize the GRO.
388 // We need to emit `get_return_object` first. According to:
389 // [dcl.fct.def.coroutine]p7
390 // The call to get_return_­object is sequenced before the call to
391 // initial_suspend and is invoked at most once.
392 //
393 // So we couldn't emit return value when we emit return statment,
394 // otherwise the call to get_return_object wouldn't be in front
395 // of initial_suspend.
396 if (returnValue.isValid())
397 emitAnyExprToMem(s.getReturnValue(), returnValue,
398 s.getReturnValue()->getType().getQualifiers(),
399 /*isInit*/ true);
400
402
403 curCoro.data->currentAwaitKind = cir::AwaitKind::Init;
404 if (emitStmt(s.getInitSuspendStmt(), /*useCurrentScope=*/true).failed())
405 return mlir::failure();
406
407 curCoro.data->currentAwaitKind = cir::AwaitKind::User;
408
409 mlir::OpBuilder::InsertPoint userBody;
410 auto coroBodyOp =
411 cir::CoroBodyOp::create(builder, openCurlyLoc, /*scopeBuilder=*/
412 [&](mlir::OpBuilder &b, mlir::Location loc) {
413 userBody = b.saveInsertionPoint();
414 });
415 {
416 mlir::OpBuilder::InsertionGuard guard(builder);
417 builder.restoreInsertionPoint(userBody);
418 // FIXME(cir): wrap emitBodyAndFallthrough with try/catch bits.
419 if (s.getExceptionHandler()) {
421 cgm.errorNYI("exceptions in coroutines are not yet supported in CIR");
422 }
423 if (emitBodyAndFallthrough(*this, s, s.getBody(), curLexScope).failed()) {
424 return mlir::failure();
425 }
426 }
427
428 mlir::Block &coroBodyBlock = coroBodyOp.getBody().back();
429 if (!coroBodyBlock.mightHaveTerminator()) {
430 mlir::OpBuilder::InsertionGuard guard(builder);
431 builder.setInsertionPointToEnd(&coroBodyBlock);
432 cir::YieldOp::create(builder, openCurlyLoc);
433 }
434
435 // Note that LLVM checks CanFallthrough by looking into the availability
436 // of the insert block which is kinda brittle and unintuitive, seems to be
437 // related with how landing pads are handled.
438 //
439 // CIRGen handles this by checking pre-existing co_returns in the current
440 // scope instead.
441 //
442 // From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock();
443 const bool canFallthrough = curLexScope->hasCoreturn();
444 const bool hasCoreturns = curCoro.data->coreturnCount > 0;
445 if (canFallthrough || hasCoreturns) {
446 curCoro.data->currentAwaitKind = cir::AwaitKind::Final;
447 {
448 mlir::OpBuilder::InsertionGuard guard(builder);
449 if (emitStmt(s.getFinalSuspendStmt(), /*useCurrentScope=*/true)
450 .failed())
451 return mlir::failure();
452 }
453 }
454 }
455
457 openCurlyLoc, builder.getNullPtr(builder.getVoidPtrTy(), openCurlyLoc));
458 if (auto *ret = cast_or_null<ReturnStmt>(s.getReturnStmt())) {
459 // Since we already emitted the return value above, so we shouldn't
460 // emit it again here.
461 Expr *previousRetValue = ret->getRetValue();
462 ret->setRetValue(nullptr);
463 if (emitStmt(ret, /*useCurrentScope=*/true).failed())
464 return mlir::failure();
465 // Set the return value back. The code generator, as the AST **Consumer**,
466 // shouldn't change the AST.
467 ret->setRetValue(previousRetValue);
468 }
469 return mlir::success();
470}
471
472static bool memberCallExpressionCanThrow(const Expr *e) {
473 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
474 if (const auto *proto =
475 ce->getMethodDecl()->getType()->getAs<FunctionProtoType>())
476 if (isNoexceptExceptionSpec(proto->getExceptionSpecType()) &&
477 proto->canThrow() == CT_Cannot)
478 return false;
479 return true;
480}
481
482// Given a suspend expression which roughly looks like:
483//
484// auto && x = CommonExpr();
485// if (!x.await_ready()) {
486// x.await_suspend(...); (*)
487// }
488// x.await_resume();
489//
490// where the result of the entire expression is the result of x.await_resume()
491//
492// (*) If x.await_suspend return type is bool, it allows to veto a suspend:
493// if (x.await_suspend(...))
494// llvm_coro_suspend();
495//
496// This is more higher level than LLVM codegen, for that one see llvm's
497// docs/Coroutines.rst for more details.
498namespace {
499struct LValueOrRValue {
500 LValue lv;
501 RValue rv;
502};
503} // namespace
504
505static LValueOrRValue
507 CoroutineSuspendExpr const &s, cir::AwaitKind kind,
508 AggValueSlot aggSlot, bool ignoreResult,
509 mlir::Block *scopeParentBlock,
510 mlir::Value &tmpResumeRValAddr, bool forLValue) {
511 [[maybe_unused]] mlir::LogicalResult awaitBuild = mlir::success();
512 LValueOrRValue awaitRes;
513
515 CIRGenFunction::OpaqueValueMapping(cgf, s.getOpaqueValue());
516 CIRGenBuilderTy &builder = cgf.getBuilder();
517 [[maybe_unused]] cir::AwaitOp awaitOp = cir::AwaitOp::create(
518 builder, cgf.getLoc(s.getSourceRange()), kind,
519 /*readyBuilder=*/
520 [&](mlir::OpBuilder &b, mlir::Location loc) {
521 Expr *condExpr = s.getReadyExpr()->IgnoreParens();
522 builder.createCondition(cgf.evaluateExprAsBool(condExpr));
523 },
524 /*suspendBuilder=*/
525 [&](mlir::OpBuilder &b, mlir::Location loc) {
526 // Note that differently from LLVM codegen we do not emit coro.save
527 // and coro.suspend here, that should be done as part of lowering this
528 // to LLVM dialect (or some other MLIR dialect)
529
530 // A invalid suspendRet indicates "void returning await_suspend"
531 mlir::Value suspendRet = cgf.emitScalarExpr(s.getSuspendExpr());
532
533 // Veto suspension if requested by bool returning await_suspend.
534 if (suspendRet) {
535 cgf.cgm.errorNYI("Veto await_suspend");
536 }
537
538 // Signals the parent that execution flows to next region.
539 cir::YieldOp::create(builder, loc);
540 },
541 /*resumeBuilder=*/
542 [&](mlir::OpBuilder &b, mlir::Location loc) {
543 // Exception handling requires additional IR. If the 'await_resume'
544 // function is marked as 'noexcept', we avoid generating this additional
545 // IR.
546 CXXTryStmt *tryStmt = nullptr;
547 if (coro.exceptionHandler && kind == cir::AwaitKind::Init &&
548 memberCallExpressionCanThrow(s.getResumeExpr()))
549 cgf.cgm.errorNYI("Coro resume Exception");
550
551 // FIXME(cir): the alloca for the resume expr should be placed in the
552 // enclosing cir.scope instead.
553 if (forLValue) {
554 awaitRes.lv = cgf.emitLValue(s.getResumeExpr());
555 } else {
556 awaitRes.rv =
557 cgf.emitAnyExpr(s.getResumeExpr(), aggSlot, ignoreResult);
558 if (!awaitRes.rv.isIgnored()) {
559 // Create the alloca in the block before the scope wrapping
560 // cir.await.
561 mlir::Value value;
562 RValue rv = awaitRes.rv;
563 if (rv.isScalar()) {
564 value = rv.getValue();
565 } else if (rv.isComplex()) {
566 value = rv.getComplexValue();
567 } else {
568 cgf.cgm.errorNYI("emitSuspendExpression: Aggregate value");
569 return;
570 }
571
572 tmpResumeRValAddr = cgf.emitAlloca(
573 "__coawait_resume_rval", value.getType(), loc, CharUnits::One(),
574 builder.getBestAllocaInsertPoint(scopeParentBlock));
575 // Store the rvalue so we can reload it before the promise call.
576 builder.CIRBaseBuilderTy::createStore(loc, value,
577 tmpResumeRValAddr);
578 }
579 }
580
581 if (tryStmt)
582 cgf.cgm.errorNYI("Coro tryStmt");
583
584 // Returns control back to parent.
585 cir::YieldOp::create(builder, loc);
586 });
587
588 assert(awaitBuild.succeeded() && "Should know how to codegen");
589 return awaitRes;
590}
591
593 const CoroutineSuspendExpr &e,
594 cir::AwaitKind kind, AggValueSlot aggSlot,
595 bool ignoreResult) {
596 RValue rval;
597 mlir::Location scopeLoc = cgf.getLoc(e.getSourceRange());
598
599 // Since we model suspend / resume as an inner region, we must store
600 // resume scalar results in a tmp alloca, and load it after we build the
601 // suspend expression. An alternative way to do this would be to make
602 // every region return a value when promise.return_value() is used, but
603 // it's a bit awkward given that resume is the only region that actually
604 // returns a value.
605 mlir::Block *currEntryBlock = cgf.curLexScope->getEntryBlock();
606 [[maybe_unused]] mlir::Value tmpResumeRValAddr;
607
608 // No need to explicitly wrap this into a scope since the AST already uses a
609 // ExprWithCleanups, which will wrap this into a cir.scope anyways.
610 rval = emitSuspendExpression(cgf, *cgf.curCoro.data, e, kind, aggSlot,
611 ignoreResult, currEntryBlock, tmpResumeRValAddr,
612 /*forLValue*/ false)
613 .rv;
614
615 if (ignoreResult || rval.isIgnored())
616 return rval;
617
618 if (rval.isScalar()) {
619 rval = RValue::get(cir::LoadOp::create(cgf.getBuilder(), scopeLoc,
620 rval.getValue().getType(),
621 tmpResumeRValAddr));
622 } else if (rval.isAggregate()) {
623 // This is probably already handled via AggSlot, remove this assertion
624 // once we have a testcase and prove all pieces work.
625 cgf.cgm.errorNYI("emitSuspendExpr Aggregate");
626 } else { // complex
627 rval = RValue::getComplex(cir::LoadOp::create(
628 cgf.getBuilder(), scopeLoc, rval.getComplexValue().getType(),
629 tmpResumeRValAddr));
630 }
631 return rval;
632}
633
635 AggValueSlot aggSlot,
636 bool ignoreResult) {
637 return emitSuspendExpr(*this, e, curCoro.data->currentAwaitKind, aggSlot,
638 ignoreResult);
639}
640
642 AggValueSlot aggSlot,
643 bool ignoreResult) {
644 return emitSuspendExpr(*this, e, cir::AwaitKind::Yield, aggSlot,
645 ignoreResult);
646}
647
649 ++curCoro.data->coreturnCount;
650 curLexScope->setCoreturn();
651
652 const Expr *rv = s.getOperand();
653 if (rv && rv->getType()->isVoidType() && !isa<InitListExpr>(rv)) {
654 // Make sure to evaluate the non initlist expression of a co_return
655 // with a void expression for side effects.
656 RunCleanupsScope cleanupScope(*this);
657 emitIgnoredExpr(rv);
658 }
659
660 if (emitStmt(s.getPromiseCall(), /*useCurrentScope=*/true).failed())
661 return mlir::failure();
662 // Create a new return block (if not existent) and add a branch to
663 // it. The actual return instruction is only inserted during current
664 // scope cleanup handling.
665 mlir::Location loc = getLoc(s.getSourceRange());
666 cir::CoReturnOp::create(builder, loc);
667
668 return mlir::success();
669}
static void emit(Program &P, llvm::SmallVectorImpl< std::byte > &Code, const T &Val, bool &Success)
Helper to write bytecode and bail out if 32-bit offsets become invalid.
static LValueOrRValue emitSuspendExpression(CIRGenFunction &cgf, CGCoroData &coro, CoroutineSuspendExpr const &s, cir::AwaitKind kind, AggValueSlot aggSlot, bool ignoreResult, mlir::Block *scopeParentBlock, mlir::Value &tmpResumeRValAddr, bool forLValue)
static RValue emitSuspendExpr(CIRGenFunction &cgf, const CoroutineSuspendExpr &e, cir::AwaitKind kind, AggValueSlot aggSlot, bool ignoreResult)
static bool memberCallExpressionCanThrow(const Expr *e)
static mlir::LogicalResult emitBodyAndFallthrough(CIRGenFunction &cgf, const CoroutineBodyStmt &s, Stmt *body, const CIRGenFunction::LexicalScope *currLexScope)
static void createCoroData(CIRGenFunction &cgf, CIRGenFunction::CGCoroInfo &curCoro, cir::CallOp coroId)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
mlir::Value createPtrIsNotNull(mlir::Value ptr)
mlir::Value getPointer() const
Definition Address.h:96
An aggregate value slot.
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
cir::CallOp emitCoroFreeBuiltin(const CallExpr *e)
RValue emitCoyieldExpr(const CoyieldExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Type convertTypeForMem(QualType t)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
Address returnValue
The temporary alloca to hold the return value.
CIRGenBuilderTy & getBuilder()
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
clang::ASTContext & getContext() const
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
void error(SourceLocation loc, llvm::StringRef error)
Emit a general error that something can't be done.
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
bool isComplex() const
Definition CIRGenValue.h:50
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
bool isIgnored() const
Definition CIRGenValue.h:52
mlir::Value getComplexValue() const
Return the value of this complex value.
Definition CIRGenValue.h:63
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
SourceLocation getBeginLoc() const
Definition Expr.h:3280
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
Represents a 'co_await' expression.
Definition ExprCXX.h:5365
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
Represents an expression that might suspend coroutine execution; either a co_await or co_yield expres...
Definition ExprCXX.h:5251
Represents a 'co_yield' expression.
Definition ExprCXX.h:5446
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
ValueDecl * getDecl()
Definition Expr.h:1341
bool isSingleDecl() const
isSingleDecl - This method returns true if this DeclStmt refers to a single Decl.
Definition Stmt.h:1650
const Decl * getSingleDecl() const
Definition Stmt.h:1652
This represents one expression.
Definition Expr.h:112
QualType getType() const
Definition Expr.h:144
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Stmt - This represents one statement.
Definition Stmt.h:86
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:227
unsigned getNewAlign() const
Return the largest alignment for which a suitably-sized allocation with 'operator new(size_t)' is gua...
Definition TargetInfo.h:767
unsigned getCharWidth() const
Definition TargetInfo.h:521
bool isVoidType() const
Definition TypeBase.h:9039
const Expr * getInit() const
Definition Decl.h:1381
Defines the clang::TargetInfo interface.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isNoexceptExceptionSpec(ExceptionSpecificationType ESpecType)
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool ehCleanupScope()
static bool coroutineExceptions()
static bool coroOutsideFrameMD()
static bool generateDebugInfo()
std::unique_ptr< CGCoroData > data
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
cir::PointerType voidPtrTy
void* in address space 0