clang 23.0.0git
FlattenCFG.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements pass that inlines CIR operations regions into the parent
10// function region.
11//
12//===----------------------------------------------------------------------===//
13
14#include "PassDetail.h"
15#include "mlir/Dialect/Func/IR/FuncOps.h"
16#include "mlir/IR/Block.h"
17#include "mlir/IR/Builders.h"
18#include "mlir/IR/PatternMatch.h"
19#include "mlir/Support/LogicalResult.h"
20#include "mlir/Transforms/DialectConversion.h"
21#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
26#include "llvm/ADT/TypeSwitch.h"
27
28using namespace mlir;
29using namespace cir;
30
31namespace mlir {
32#define GEN_PASS_DEF_CIRFLATTENCFG
33#include "clang/CIR/Dialect/Passes.h.inc"
34} // namespace mlir
35
36namespace {
37
38/// Lowers operations with the terminator trait that have a single successor.
39void lowerTerminator(mlir::Operation *op, mlir::Block *dest,
40 mlir::PatternRewriter &rewriter) {
41 assert(op->hasTrait<mlir::OpTrait::IsTerminator>() && "not a terminator");
42 mlir::OpBuilder::InsertionGuard guard(rewriter);
43 rewriter.setInsertionPoint(op);
44 rewriter.replaceOpWithNewOp<cir::BrOp>(op, dest);
45}
46
47/// Walks a region while skipping operations of type `Ops`. This ensures the
48/// callback is not applied to said operations and its children.
49template <typename... Ops>
50void walkRegionSkipping(
51 mlir::Region &region,
52 mlir::function_ref<mlir::WalkResult(mlir::Operation *)> callback) {
53 region.walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *op) {
54 if (isa<Ops...>(op))
55 return mlir::WalkResult::skip();
56 return callback(op);
57 });
58}
59
60struct CIRFlattenCFGPass : public impl::CIRFlattenCFGBase<CIRFlattenCFGPass> {
61
62 CIRFlattenCFGPass() = default;
63 void runOnOperation() override;
64};
65
66struct CIRIfFlattening : public mlir::OpRewritePattern<cir::IfOp> {
67 using OpRewritePattern<IfOp>::OpRewritePattern;
68
69 mlir::LogicalResult
70 matchAndRewrite(cir::IfOp ifOp,
71 mlir::PatternRewriter &rewriter) const override {
72 mlir::OpBuilder::InsertionGuard guard(rewriter);
73 mlir::Location loc = ifOp.getLoc();
74 bool emptyElse = ifOp.getElseRegion().empty();
75 mlir::Block *currentBlock = rewriter.getInsertionBlock();
76 mlir::Block *remainingOpsBlock =
77 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
78 mlir::Block *continueBlock;
79 if (ifOp->getResults().empty())
80 continueBlock = remainingOpsBlock;
81 else
82 llvm_unreachable("NYI");
83
84 // Inline the region
85 mlir::Block *thenBeforeBody = &ifOp.getThenRegion().front();
86 mlir::Block *thenAfterBody = &ifOp.getThenRegion().back();
87 rewriter.inlineRegionBefore(ifOp.getThenRegion(), continueBlock);
88
89 rewriter.setInsertionPointToEnd(thenAfterBody);
90 if (auto thenYieldOp =
91 dyn_cast<cir::YieldOp>(thenAfterBody->getTerminator())) {
92 rewriter.replaceOpWithNewOp<cir::BrOp>(thenYieldOp, thenYieldOp.getArgs(),
93 continueBlock);
94 }
95
96 rewriter.setInsertionPointToEnd(continueBlock);
97
98 // Has else region: inline it.
99 mlir::Block *elseBeforeBody = nullptr;
100 mlir::Block *elseAfterBody = nullptr;
101 if (!emptyElse) {
102 elseBeforeBody = &ifOp.getElseRegion().front();
103 elseAfterBody = &ifOp.getElseRegion().back();
104 rewriter.inlineRegionBefore(ifOp.getElseRegion(), continueBlock);
105 } else {
106 elseBeforeBody = elseAfterBody = continueBlock;
107 }
108
109 rewriter.setInsertionPointToEnd(currentBlock);
110 cir::BrCondOp::create(rewriter, loc, ifOp.getCondition(), thenBeforeBody,
111 elseBeforeBody);
112
113 if (!emptyElse) {
114 rewriter.setInsertionPointToEnd(elseAfterBody);
115 if (auto elseYieldOP =
116 dyn_cast<cir::YieldOp>(elseAfterBody->getTerminator())) {
117 rewriter.replaceOpWithNewOp<cir::BrOp>(
118 elseYieldOP, elseYieldOP.getArgs(), continueBlock);
119 }
120 }
121
122 rewriter.replaceOp(ifOp, continueBlock->getArguments());
123 return mlir::success();
124 }
125};
126
127class CIRScopeOpFlattening : public mlir::OpRewritePattern<cir::ScopeOp> {
128public:
129 using OpRewritePattern<cir::ScopeOp>::OpRewritePattern;
130
131 mlir::LogicalResult
132 matchAndRewrite(cir::ScopeOp scopeOp,
133 mlir::PatternRewriter &rewriter) const override {
134 mlir::OpBuilder::InsertionGuard guard(rewriter);
135 mlir::Location loc = scopeOp.getLoc();
136
137 // Empty scope: just remove it.
138 // TODO: Remove this logic once CIR uses MLIR infrastructure to remove
139 // trivially dead operations. MLIR canonicalizer is too aggressive and we
140 // need to either (a) make sure all our ops model all side-effects and/or
141 // (b) have more options in the canonicalizer in MLIR to temper
142 // aggressiveness level.
143 if (scopeOp.isEmpty()) {
144 rewriter.eraseOp(scopeOp);
145 return mlir::success();
146 }
147
148 // Split the current block before the ScopeOp to create the inlining
149 // point.
150 mlir::Block *currentBlock = rewriter.getInsertionBlock();
151 mlir::Block *continueBlock =
152 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
153 if (scopeOp.getNumResults() > 0)
154 continueBlock->addArguments(scopeOp.getResultTypes(), loc);
155
156 // Inline body region.
157 mlir::Block *beforeBody = &scopeOp.getScopeRegion().front();
158 mlir::Block *afterBody = &scopeOp.getScopeRegion().back();
159 rewriter.inlineRegionBefore(scopeOp.getScopeRegion(), continueBlock);
160
161 // Save stack and then branch into the body of the region.
162 rewriter.setInsertionPointToEnd(currentBlock);
164 cir::BrOp::create(rewriter, loc, mlir::ValueRange(), beforeBody);
165
166 // Replace the scopeop return with a branch that jumps out of the body.
167 // Stack restore before leaving the body region.
168 rewriter.setInsertionPointToEnd(afterBody);
169 if (auto yieldOp = dyn_cast<cir::YieldOp>(afterBody->getTerminator())) {
170 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, yieldOp.getArgs(),
171 continueBlock);
172 }
173
174 // Replace the op with values return from the body region.
175 rewriter.replaceOp(scopeOp, continueBlock->getArguments());
176
177 return mlir::success();
178 }
179};
180
181class CIRSwitchOpFlattening : public mlir::OpRewritePattern<cir::SwitchOp> {
182public:
183 using OpRewritePattern<cir::SwitchOp>::OpRewritePattern;
184
185 inline void rewriteYieldOp(mlir::PatternRewriter &rewriter,
186 cir::YieldOp yieldOp,
187 mlir::Block *destination) const {
188 rewriter.setInsertionPoint(yieldOp);
189 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, yieldOp.getOperands(),
190 destination);
191 }
192
193 // Return the new defaultDestination block.
194 Block *condBrToRangeDestination(cir::SwitchOp op,
195 mlir::PatternRewriter &rewriter,
196 mlir::Block *rangeDestination,
197 mlir::Block *defaultDestination,
198 const APInt &lowerBound,
199 const APInt &upperBound) const {
200 assert(lowerBound.sle(upperBound) && "Invalid range");
201 mlir::Block *resBlock = rewriter.createBlock(defaultDestination);
202 cir::IntType sIntType = cir::IntType::get(op.getContext(), 32, true);
203 cir::IntType uIntType = cir::IntType::get(op.getContext(), 32, false);
204
205 cir::ConstantOp rangeLength = cir::ConstantOp::create(
206 rewriter, op.getLoc(),
207 cir::IntAttr::get(sIntType, upperBound - lowerBound));
208
209 cir::ConstantOp lowerBoundValue = cir::ConstantOp::create(
210 rewriter, op.getLoc(), cir::IntAttr::get(sIntType, lowerBound));
211 mlir::Value diffValue = cir::SubOp::create(
212 rewriter, op.getLoc(), op.getCondition(), lowerBoundValue);
213
214 // Use unsigned comparison to check if the condition is in the range.
215 cir::CastOp uDiffValue = cir::CastOp::create(
216 rewriter, op.getLoc(), uIntType, CastKind::integral, diffValue);
217 cir::CastOp uRangeLength = cir::CastOp::create(
218 rewriter, op.getLoc(), uIntType, CastKind::integral, rangeLength);
219
220 cir::CmpOp cmpResult = cir::CmpOp::create(
221 rewriter, op.getLoc(), cir::CmpOpKind::le, uDiffValue, uRangeLength);
222 cir::BrCondOp::create(rewriter, op.getLoc(), cmpResult, rangeDestination,
223 defaultDestination);
224 return resBlock;
225 }
226
227 mlir::LogicalResult
228 matchAndRewrite(cir::SwitchOp op,
229 mlir::PatternRewriter &rewriter) const override {
230 // Cleanup scopes must be lowered before the enclosing switch so that
231 // break inside them is properly routed through cleanup.
232 // Fail the match so the pattern rewriter will process cleanup scopes first.
233 bool hasNestedCleanup = op->walk([&](cir::CleanupScopeOp) {
234 return mlir::WalkResult::interrupt();
235 }).wasInterrupted();
236 if (hasNestedCleanup)
237 return mlir::failure();
238
239 llvm::SmallVector<CaseOp> cases;
240 op.collectCases(cases);
241
242 // Empty switch statement: just erase it.
243 if (cases.empty()) {
244 rewriter.eraseOp(op);
245 return mlir::success();
246 }
247
248 // Create exit block from the next node of cir.switch op.
249 mlir::Block *exitBlock = rewriter.splitBlock(
250 rewriter.getBlock(), op->getNextNode()->getIterator());
251
252 // We lower cir.switch op in the following process:
253 // 1. Inline the region from the switch op after switch op.
254 // 2. Traverse each cir.case op:
255 // a. Record the entry block, block arguments and condition for every
256 // case. b. Inline the case region after the case op.
257 // 3. Replace the empty cir.switch.op with the new cir.switchflat op by the
258 // recorded block and conditions.
259
260 // inline everything from switch body between the switch op and the exit
261 // block.
262 {
263 cir::YieldOp switchYield = nullptr;
264 // Clear switch operation.
265 for (mlir::Block &block :
266 llvm::make_early_inc_range(op.getBody().getBlocks()))
267 if (auto yieldOp = dyn_cast<cir::YieldOp>(block.getTerminator()))
268 switchYield = yieldOp;
269
270 assert(!op.getBody().empty());
271 mlir::Block *originalBlock = op->getBlock();
272 mlir::Block *swopBlock =
273 rewriter.splitBlock(originalBlock, op->getIterator());
274 rewriter.inlineRegionBefore(op.getBody(), exitBlock);
275
276 if (switchYield)
277 rewriteYieldOp(rewriter, switchYield, exitBlock);
278
279 rewriter.setInsertionPointToEnd(originalBlock);
280 cir::BrOp::create(rewriter, op.getLoc(), swopBlock);
281 }
282
283 // Allocate required data structures (disconsider default case in
284 // vectors).
285 llvm::SmallVector<mlir::APInt, 8> caseValues;
286 llvm::SmallVector<mlir::Block *, 8> caseDestinations;
287 llvm::SmallVector<mlir::ValueRange, 8> caseOperands;
288
289 llvm::SmallVector<std::pair<APInt, APInt>> rangeValues;
290 llvm::SmallVector<mlir::Block *> rangeDestinations;
291 llvm::SmallVector<mlir::ValueRange> rangeOperands;
292
293 // Initialize default case as optional.
294 mlir::Block *defaultDestination = exitBlock;
295 mlir::ValueRange defaultOperands = exitBlock->getArguments();
296
297 // Digest the case statements values and bodies.
298 for (cir::CaseOp caseOp : cases) {
299 mlir::Region &region = caseOp.getCaseRegion();
300
301 // Found default case: save destination and operands.
302 switch (caseOp.getKind()) {
303 case cir::CaseOpKind::Default:
304 defaultDestination = &region.front();
305 defaultOperands = defaultDestination->getArguments();
306 break;
307 case cir::CaseOpKind::Range:
308 assert(caseOp.getValue().size() == 2 &&
309 "Case range should have 2 case value");
310 rangeValues.push_back(
311 {cast<cir::IntAttr>(caseOp.getValue()[0]).getValue(),
312 cast<cir::IntAttr>(caseOp.getValue()[1]).getValue()});
313 rangeDestinations.push_back(&region.front());
314 rangeOperands.push_back(rangeDestinations.back()->getArguments());
315 break;
316 case cir::CaseOpKind::Anyof:
317 case cir::CaseOpKind::Equal:
318 // AnyOf cases kind can have multiple values, hence the loop below.
319 for (const mlir::Attribute &value : caseOp.getValue()) {
320 caseValues.push_back(cast<cir::IntAttr>(value).getValue());
321 caseDestinations.push_back(&region.front());
322 caseOperands.push_back(caseDestinations.back()->getArguments());
323 }
324 break;
325 }
326
327 // Handle break statements.
328 walkRegionSkipping<cir::LoopOpInterface, cir::SwitchOp>(
329 region, [&](mlir::Operation *op) {
330 if (!isa<cir::BreakOp>(op))
331 return mlir::WalkResult::advance();
332
333 lowerTerminator(op, exitBlock, rewriter);
334 return mlir::WalkResult::skip();
335 });
336
337 // Track fallthrough in cases.
338 for (mlir::Block &blk : region.getBlocks()) {
339 if (blk.getNumSuccessors())
340 continue;
341
342 if (auto yieldOp = dyn_cast<cir::YieldOp>(blk.getTerminator())) {
343 mlir::Operation *nextOp = caseOp->getNextNode();
344 assert(nextOp && "caseOp is not expected to be the last op");
345 mlir::Block *oldBlock = nextOp->getBlock();
346 mlir::Block *newBlock =
347 rewriter.splitBlock(oldBlock, nextOp->getIterator());
348 rewriter.setInsertionPointToEnd(oldBlock);
349 cir::BrOp::create(rewriter, nextOp->getLoc(), mlir::ValueRange(),
350 newBlock);
351 rewriteYieldOp(rewriter, yieldOp, newBlock);
352 }
353 }
354
355 mlir::Block *oldBlock = caseOp->getBlock();
356 mlir::Block *newBlock =
357 rewriter.splitBlock(oldBlock, caseOp->getIterator());
358
359 mlir::Block &entryBlock = caseOp.getCaseRegion().front();
360 rewriter.inlineRegionBefore(caseOp.getCaseRegion(), newBlock);
361
362 // Create a branch to the entry of the inlined region.
363 rewriter.setInsertionPointToEnd(oldBlock);
364 cir::BrOp::create(rewriter, caseOp.getLoc(), &entryBlock);
365 }
366
367 // Remove all cases since we've inlined the regions.
368 for (cir::CaseOp caseOp : cases) {
369 mlir::Block *caseBlock = caseOp->getBlock();
370 // Erase the block with no predecessors here to make the generated code
371 // simpler a little bit.
372 if (caseBlock->hasNoPredecessors())
373 rewriter.eraseBlock(caseBlock);
374 else
375 rewriter.eraseOp(caseOp);
376 }
377
378 for (auto [rangeVal, operand, destination] :
379 llvm::zip(rangeValues, rangeOperands, rangeDestinations)) {
380 APInt lowerBound = rangeVal.first;
381 APInt upperBound = rangeVal.second;
382
383 // The case range is unreachable, skip it.
384 if (lowerBound.sgt(upperBound))
385 continue;
386
387 // If range is small, add multiple switch instruction cases.
388 // This magical number is from the original CGStmt code.
389 constexpr int kSmallRangeThreshold = 64;
390 if ((upperBound - lowerBound)
391 .ult(llvm::APInt(32, kSmallRangeThreshold))) {
392 for (APInt iValue = lowerBound; iValue.sle(upperBound); ++iValue) {
393 caseValues.push_back(iValue);
394 caseOperands.push_back(operand);
395 caseDestinations.push_back(destination);
396 }
397 continue;
398 }
399
400 defaultDestination =
401 condBrToRangeDestination(op, rewriter, destination,
402 defaultDestination, lowerBound, upperBound);
403 defaultOperands = operand;
404 }
405
406 // Set switch op to branch to the newly created blocks.
407 rewriter.setInsertionPoint(op);
408 rewriter.replaceOpWithNewOp<cir::SwitchFlatOp>(
409 op, op.getCondition(), defaultDestination, defaultOperands, caseValues,
410 caseDestinations, caseOperands);
411
412 return mlir::success();
413 }
414};
415
416class CIRLoopOpInterfaceFlattening
417 : public mlir::OpInterfaceRewritePattern<cir::LoopOpInterface> {
418public:
419 using mlir::OpInterfaceRewritePattern<
420 cir::LoopOpInterface>::OpInterfaceRewritePattern;
421
422 inline void lowerConditionOp(cir::ConditionOp op, mlir::Block *body,
423 mlir::Block *exit,
424 mlir::PatternRewriter &rewriter) const {
425 mlir::OpBuilder::InsertionGuard guard(rewriter);
426 rewriter.setInsertionPoint(op);
427 rewriter.replaceOpWithNewOp<cir::BrCondOp>(op, op.getCondition(), body,
428 exit);
429 }
430
431 mlir::LogicalResult
432 matchAndRewrite(cir::LoopOpInterface op,
433 mlir::PatternRewriter &rewriter) const final {
434 // Cleanup scopes must be lowered before the enclosing loop so that
435 // break/continue inside them are properly routed through cleanup.
436 // Fail the match so the pattern rewriter will process cleanup scopes first.
437 bool hasNestedCleanup = op->walk([&](cir::CleanupScopeOp) {
438 return mlir::WalkResult::interrupt();
439 }).wasInterrupted();
440 if (hasNestedCleanup)
441 return mlir::failure();
442
443 // Setup CFG blocks.
444 mlir::Block *entry = rewriter.getInsertionBlock();
445 mlir::Block *exit =
446 rewriter.splitBlock(entry, rewriter.getInsertionPoint());
447 mlir::Block *cond = &op.getCond().front();
448 mlir::Block *body = &op.getBody().front();
449 mlir::Block *step =
450 (op.maybeGetStep() ? &op.maybeGetStep()->front() : nullptr);
451
452 // Setup loop entry branch.
453 rewriter.setInsertionPointToEnd(entry);
454 cir::BrOp::create(rewriter, op.getLoc(), &op.getEntry().front());
455
456 // Branch from condition region to body or exit.
457 auto conditionOp = cast<cir::ConditionOp>(cond->getTerminator());
458 lowerConditionOp(conditionOp, body, exit, rewriter);
459
460 // TODO(cir): Remove the walks below. It visits operations unnecessarily.
461 // However, to solve this we would likely need a custom DialectConversion
462 // driver to customize the order that operations are visited.
463
464 // Lower continue statements.
465 mlir::Block *dest = (step ? step : cond);
466 op.walkBodySkippingNestedLoops([&](mlir::Operation *op) {
467 if (!isa<cir::ContinueOp>(op))
468 return mlir::WalkResult::advance();
469
470 lowerTerminator(op, dest, rewriter);
471 return mlir::WalkResult::skip();
472 });
473
474 // Lower break statements.
475 walkRegionSkipping<cir::LoopOpInterface, cir::SwitchOp>(
476 op.getBody(), [&](mlir::Operation *op) {
477 if (!isa<cir::BreakOp>(op))
478 return mlir::WalkResult::advance();
479
480 lowerTerminator(op, exit, rewriter);
481 return mlir::WalkResult::skip();
482 });
483
484 // Lower optional body region yield.
485 for (mlir::Block &blk : op.getBody().getBlocks()) {
486 auto bodyYield = dyn_cast<cir::YieldOp>(blk.getTerminator());
487 if (bodyYield)
488 lowerTerminator(bodyYield, (step ? step : cond), rewriter);
489 }
490
491 // Lower mandatory step region yield.
492 if (step)
493 lowerTerminator(cast<cir::YieldOp>(step->getTerminator()), cond,
494 rewriter);
495
496 // Move region contents out of the loop op.
497 rewriter.inlineRegionBefore(op.getCond(), exit);
498 rewriter.inlineRegionBefore(op.getBody(), exit);
499 if (step)
500 rewriter.inlineRegionBefore(*op.maybeGetStep(), exit);
501
502 rewriter.eraseOp(op);
503 return mlir::success();
504 }
505};
506
507class CIRTernaryOpFlattening : public mlir::OpRewritePattern<cir::TernaryOp> {
508public:
509 using OpRewritePattern<cir::TernaryOp>::OpRewritePattern;
510
511 mlir::LogicalResult
512 matchAndRewrite(cir::TernaryOp op,
513 mlir::PatternRewriter &rewriter) const override {
514 Location loc = op->getLoc();
515 Block *condBlock = rewriter.getInsertionBlock();
516 Block::iterator opPosition = rewriter.getInsertionPoint();
517 Block *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition);
518 llvm::SmallVector<mlir::Location, 2> locs;
519 // Ternary result is optional, make sure to populate the location only
520 // when relevant.
521 if (op->getResultTypes().size())
522 locs.push_back(loc);
523 Block *continueBlock =
524 rewriter.createBlock(remainingOpsBlock, op->getResultTypes(), locs);
525 cir::BrOp::create(rewriter, loc, remainingOpsBlock);
526
527 Region &trueRegion = op.getTrueRegion();
528 Block *trueBlock = &trueRegion.front();
529 mlir::Operation *trueTerminator = trueRegion.back().getTerminator();
530 rewriter.setInsertionPointToEnd(&trueRegion.back());
531
532 // Handle both yield and unreachable terminators (throw expressions)
533 if (auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator)) {
534 rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
535 continueBlock);
536 } else if (isa<cir::UnreachableOp>(trueTerminator)) {
537 // Terminator is unreachable (e.g., from throw), just keep it
538 } else {
539 trueTerminator->emitError("unexpected terminator in ternary true region, "
540 "expected yield or unreachable, got: ")
541 << trueTerminator->getName();
542 return mlir::failure();
543 }
544 rewriter.inlineRegionBefore(trueRegion, continueBlock);
545
546 Block *falseBlock = continueBlock;
547 Region &falseRegion = op.getFalseRegion();
548
549 falseBlock = &falseRegion.front();
550 mlir::Operation *falseTerminator = falseRegion.back().getTerminator();
551 rewriter.setInsertionPointToEnd(&falseRegion.back());
552
553 // Handle both yield and unreachable terminators (throw expressions)
554 if (auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator)) {
555 rewriter.replaceOpWithNewOp<cir::BrOp>(
556 falseYieldOp, falseYieldOp.getArgs(), continueBlock);
557 } else if (isa<cir::UnreachableOp>(falseTerminator)) {
558 // Terminator is unreachable (e.g., from throw), just keep it
559 } else {
560 falseTerminator->emitError("unexpected terminator in ternary false "
561 "region, expected yield or unreachable, got: ")
562 << falseTerminator->getName();
563 return mlir::failure();
564 }
565 rewriter.inlineRegionBefore(falseRegion, continueBlock);
566
567 rewriter.setInsertionPointToEnd(condBlock);
568 cir::BrCondOp::create(rewriter, loc, op.getCond(), trueBlock, falseBlock);
569
570 rewriter.replaceOp(op, continueBlock->getArguments());
571
572 // Ok, we're done!
573 return mlir::success();
574 }
575};
576
577// Get or create the cleanup destination slot for a function. This slot is
578// shared across all cleanup scopes in the function to track which exit path
579// to take after running cleanup code when there are multiple exits.
580static cir::AllocaOp getOrCreateCleanupDestSlot(cir::FuncOp funcOp,
581 mlir::PatternRewriter &rewriter,
582 mlir::Location loc) {
583 mlir::Block &entryBlock = funcOp.getBody().front();
584
585 // Look for an existing cleanup dest slot in the entry block.
586 auto it = llvm::find_if(entryBlock, [](auto &op) {
587 return mlir::isa<AllocaOp>(&op) &&
588 mlir::cast<AllocaOp>(&op).getCleanupDestSlot();
589 });
590 if (it != entryBlock.end())
591 return mlir::cast<cir::AllocaOp>(*it);
592
593 // Create a new cleanup dest slot at the start of the entry block.
594 mlir::OpBuilder::InsertionGuard guard(rewriter);
595 rewriter.setInsertionPointToStart(&entryBlock);
596 cir::IntType s32Type =
597 cir::IntType::get(rewriter.getContext(), 32, /*isSigned=*/true);
598 cir::PointerType ptrToS32Type = cir::PointerType::get(s32Type);
599 cir::CIRDataLayout dataLayout(funcOp->getParentOfType<mlir::ModuleOp>());
600 uint64_t alignment = dataLayout.getAlignment(s32Type, true).value();
601 auto allocaOp = cir::AllocaOp::create(
602 rewriter, loc, ptrToS32Type, s32Type, "__cleanup_dest_slot",
603 /*alignment=*/rewriter.getI64IntegerAttr(alignment));
604 allocaOp.setCleanupDestSlot(true);
605 return allocaOp;
606}
607
608/// Shared EH flattening utilities used by both CIRCleanupScopeOpFlattening
609/// and CIRTryOpFlattening.
610
611// Collect all function calls in a region that may throw exceptions and need
612// to be replaced with try_call operations. Skips calls marked nothrow.
613// Nested cleanup scopes and try ops are always flattened before their
614// enclosing parents, so there are no nested regions to skip here.
615static void
616collectThrowingCalls(mlir::Region &region,
617 llvm::SmallVectorImpl<cir::CallOp> &callsToRewrite) {
618 region.walk([&](cir::CallOp callOp) {
619 if (!callOp.getNothrow())
620 callsToRewrite.push_back(callOp);
621 });
622}
623
624// Collect all cir.resume operations in a region that come from
625// already-flattened try or cleanup scope operations. These resume ops need
626// to be chained through this scope's EH handler instead of unwinding
627// directly to the caller. Nested cleanup scopes and try ops are always
628// flattened before their enclosing parents, so there are no nested regions
629// to skip here.
630static void collectResumeOps(mlir::Region &region,
632 region.walk([&](cir::ResumeOp resumeOp) { resumeOps.push_back(resumeOp); });
633}
634
635// Replace a cir.call with a cir.try_call that unwinds to the `unwindDest`
636// block if an exception is thrown.
637static void replaceCallWithTryCall(cir::CallOp callOp, mlir::Block *unwindDest,
638 mlir::Location loc,
639 mlir::PatternRewriter &rewriter) {
640 mlir::Block *callBlock = callOp->getBlock();
641
642 assert(!callOp.getNothrow() && "call is not expected to throw");
643
644 // Split the block after the call - remaining ops become the normal
645 // destination.
646 mlir::Block *normalDest =
647 rewriter.splitBlock(callBlock, std::next(callOp->getIterator()));
648
649 // Build the try_call to replace the original call.
650 // TODO(cir): Preserve function and argument attributes.
651 rewriter.setInsertionPoint(callOp);
652 cir::TryCallOp tryCallOp;
653 if (callOp.isIndirect()) {
654 mlir::Value indTarget = callOp.getIndirectCall();
655 auto ptrTy = mlir::cast<cir::PointerType>(indTarget.getType());
656 auto resTy = mlir::cast<cir::FuncType>(ptrTy.getPointee());
657 tryCallOp =
658 cir::TryCallOp::create(rewriter, loc, indTarget, resTy, normalDest,
659 unwindDest, callOp.getArgOperands());
660 } else {
661 mlir::Type resType = callOp->getNumResults() > 0
662 ? callOp->getResult(0).getType()
663 : mlir::Type();
664 tryCallOp =
665 cir::TryCallOp::create(rewriter, loc, callOp.getCalleeAttr(), resType,
666 normalDest, unwindDest, callOp.getArgOperands());
667 }
668
669 // Replace uses of the call result with the try_call result.
670 if (callOp->getNumResults() > 0)
671 callOp->getResult(0).replaceAllUsesWith(tryCallOp.getResult());
672
673 rewriter.eraseOp(callOp);
674}
675
676// Create a shared unwind destination block. The block contains a
677// cir.eh.initiate operation (optionally with the cleanup attribute) and a
678// branch to the given destination block, passing the eh_token.
679static mlir::Block *buildUnwindBlock(mlir::Block *dest, bool hasCleanup,
680 mlir::Location loc,
681 mlir::Block *insertBefore,
682 mlir::PatternRewriter &rewriter) {
683 mlir::Block *unwindBlock = rewriter.createBlock(insertBefore);
684 rewriter.setInsertionPointToEnd(unwindBlock);
685 auto ehInitiate =
686 cir::EhInitiateOp::create(rewriter, loc, /*cleanup=*/hasCleanup);
687 cir::BrOp::create(rewriter, loc, mlir::ValueRange{ehInitiate.getEhToken()},
688 dest);
689 return unwindBlock;
690}
691
692class CIRCleanupScopeOpFlattening
693 : public mlir::OpRewritePattern<cir::CleanupScopeOp> {
694public:
695 using OpRewritePattern<cir::CleanupScopeOp>::OpRewritePattern;
696
697 struct CleanupExit {
698 // An operation that exits the cleanup scope (yield, break, continue,
699 // return, etc.)
700 mlir::Operation *exitOp;
701
702 // A unique identifier for this exit's destination (used for switch dispatch
703 // when there are multiple exits).
704 int destinationId;
705
706 CleanupExit(mlir::Operation *op, int id) : exitOp(op), destinationId(id) {}
707 };
708
709 // Collect all operations that exit a cleanup scope body. Return, goto, break,
710 // and continue can all require branches through the cleanup region. When a
711 // loop is encountered, only return and goto are collected because break and
712 // continue are handled by the loop and stay within the cleanup scope. When a
713 // switch is encountered, return, goto and continue are collected because they
714 // may all branch through the cleanup, but break is local to the switch. When
715 // a nested cleanup scope is encountered, we recursively collect exits since
716 // any return, goto, break, or continue from the nested cleanup will also
717 // branch through the outer cleanup.
718 //
719 // Note that goto statements may not necessarily exit the cleanup scope, but
720 // for now we conservatively assume that they do. We'll need more nuanced
721 // handling of that when multi-exit flattening is implemented.
722 //
723 // This function assigns unique destination IDs to each exit, which are
724 // used when multi-exit cleanup scopes are flattened.
725 void collectExits(mlir::Region &cleanupBodyRegion,
726 llvm::SmallVectorImpl<CleanupExit> &exits,
727 int &nextId) const {
728 // Collect yield terminators from the body region. We do this separately
729 // because yields in nested operations, including those in nested cleanup
730 // scopes, won't branch through the outer cleanup region.
731 for (mlir::Block &block : cleanupBodyRegion) {
732 auto *terminator = block.getTerminator();
733 if (isa<cir::YieldOp>(terminator))
734 exits.emplace_back(terminator, nextId++);
735 }
736
737 // Lambda to walk a loop and collect only returns and gotos.
738 // Break and continue inside loops are handled by the loop itself.
739 // Loops don't require special handling for nested switch or cleanup scopes
740 // because break and continue never branch out of the loop.
741 auto collectExitsInLoop = [&](mlir::Operation *loopOp) {
742 loopOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) {
743 if (isa<cir::ReturnOp, cir::GotoOp>(nestedOp))
744 exits.emplace_back(nestedOp, nextId++);
745 return mlir::WalkResult::advance();
746 });
747 };
748
749 // Forward declaration for mutual recursion.
750 std::function<void(mlir::Region &, bool)> collectExitsInCleanup;
751 std::function<void(mlir::Operation *)> collectExitsInSwitch;
752
753 // Lambda to collect exits from a switch. Collects return/goto/continue but
754 // not break (handled by switch). For nested loops/cleanups, recurses.
755 collectExitsInSwitch = [&](mlir::Operation *switchOp) {
756 switchOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) {
757 if (isa<cir::CleanupScopeOp>(nestedOp)) {
758 // Walk the nested cleanup, but ignore break statements because they
759 // will be handled by the switch we are currently walking.
760 collectExitsInCleanup(
761 cast<cir::CleanupScopeOp>(nestedOp).getBodyRegion(),
762 /*ignoreBreak=*/true);
763 return mlir::WalkResult::skip();
764 } else if (isa<cir::LoopOpInterface>(nestedOp)) {
765 collectExitsInLoop(nestedOp);
766 return mlir::WalkResult::skip();
767 } else if (isa<cir::ReturnOp, cir::GotoOp, cir::ContinueOp>(nestedOp)) {
768 exits.emplace_back(nestedOp, nextId++);
769 }
770 return mlir::WalkResult::advance();
771 });
772 };
773
774 // Lambda to collect exits from a cleanup scope body region. This collects
775 // break (optionally), continue, return, and goto, handling nested loops,
776 // switches, and cleanups appropriately.
777 collectExitsInCleanup = [&](mlir::Region &region, bool ignoreBreak) {
778 region.walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *op) {
779 // We need special handling for break statements because if this cleanup
780 // scope was nested within a switch op, break will be handled by the
781 // switch operation and therefore won't exit the cleanup scope enclosing
782 // the switch. We're only collecting exits from the cleanup that started
783 // this walk. Exits from nested cleanups will be handled when we flatten
784 // the nested cleanup.
785 if (!ignoreBreak && isa<cir::BreakOp>(op)) {
786 exits.emplace_back(op, nextId++);
787 } else if (isa<cir::ContinueOp, cir::ReturnOp, cir::GotoOp>(op)) {
788 exits.emplace_back(op, nextId++);
789 } else if (isa<cir::CleanupScopeOp>(op)) {
790 // Recurse into nested cleanup's body region.
791 collectExitsInCleanup(cast<cir::CleanupScopeOp>(op).getBodyRegion(),
792 /*ignoreBreak=*/ignoreBreak);
793 return mlir::WalkResult::skip();
794 } else if (isa<cir::LoopOpInterface>(op)) {
795 // This kicks off a separate walk rather than continuing to dig deeper
796 // in the current walk because we need to handle break and continue
797 // differently inside loops.
798 collectExitsInLoop(op);
799 return mlir::WalkResult::skip();
800 } else if (isa<cir::SwitchOp>(op)) {
801 // This kicks off a separate walk rather than continuing to dig deeper
802 // in the current walk because we need to handle break differently
803 // inside switches.
804 collectExitsInSwitch(op);
805 return mlir::WalkResult::skip();
806 }
807 return mlir::WalkResult::advance();
808 });
809 };
810
811 // Collect exits from the body region.
812 collectExitsInCleanup(cleanupBodyRegion, /*ignoreBreak=*/false);
813 }
814
815 // Check if an operand's defining op should be moved to the destination block.
816 // We only sink constants and simple loads. Anything else should be saved
817 // to a temporary alloca and reloaded at the destination block.
818 static bool shouldSinkReturnOperand(mlir::Value operand,
819 cir::ReturnOp returnOp) {
820 // Block arguments can't be moved
821 mlir::Operation *defOp = operand.getDefiningOp();
822 if (!defOp)
823 return false;
824
825 // Only move constants and loads to the dispatch block. For anything else,
826 // we'll store to a temporary and reload in the dispatch block.
827 if (!mlir::isa<cir::ConstantOp, cir::LoadOp>(defOp))
828 return false;
829
830 // Check if the return is the only user
831 if (!operand.hasOneUse())
832 return false;
833
834 // Only move ops that are in the same block as the return.
835 if (defOp->getBlock() != returnOp->getBlock())
836 return false;
837
838 if (auto loadOp = mlir::dyn_cast<cir::LoadOp>(defOp)) {
839 // Only attempt to move loads of allocas in the entry block.
840 mlir::Value ptr = loadOp.getAddr();
841 auto funcOp = returnOp->getParentOfType<cir::FuncOp>();
842 assert(funcOp && "Return op has no function parent?");
843 mlir::Block &funcEntryBlock = funcOp.getBody().front();
844
845 // Check if it's an alloca in the function entry block
846 if (auto allocaOp =
847 mlir::dyn_cast_if_present<cir::AllocaOp>(ptr.getDefiningOp()))
848 return allocaOp->getBlock() == &funcEntryBlock;
849
850 return false;
851 }
852
853 // Make sure we only fall through to here with constants.
854 assert(mlir::isa<cir::ConstantOp>(defOp) && "Expected constant op");
855 return true;
856 }
857
858 // For returns with operands in cleanup dispatch blocks, the operands may not
859 // dominate the dispatch block. This function handles that by either sinking
860 // the operand's defining op to the dispatch block (for constants and simple
861 // loads) or by storing to a temporary alloca and reloading it.
862 void
863 getReturnOpOperands(cir::ReturnOp returnOp, mlir::Operation *exitOp,
864 mlir::Location loc, mlir::PatternRewriter &rewriter,
865 llvm::SmallVectorImpl<mlir::Value> &returnValues) const {
866 mlir::Block *destBlock = rewriter.getInsertionBlock();
867 auto funcOp = exitOp->getParentOfType<cir::FuncOp>();
868 assert(funcOp && "Return op has no function parent?");
869 mlir::Block &funcEntryBlock = funcOp.getBody().front();
870
871 for (mlir::Value operand : returnOp.getOperands()) {
872 if (shouldSinkReturnOperand(operand, returnOp)) {
873 // Sink the defining op to the dispatch block.
874 mlir::Operation *defOp = operand.getDefiningOp();
875 defOp->moveBefore(destBlock, destBlock->end());
876 returnValues.push_back(operand);
877 } else {
878 // Create an alloca in the function entry block.
879 cir::AllocaOp alloca;
880 {
881 mlir::OpBuilder::InsertionGuard guard(rewriter);
882 rewriter.setInsertionPointToStart(&funcEntryBlock);
883 cir::CIRDataLayout dataLayout(
884 funcOp->getParentOfType<mlir::ModuleOp>());
885 uint64_t alignment =
886 dataLayout.getAlignment(operand.getType(), true).value();
887 cir::PointerType ptrType = cir::PointerType::get(operand.getType());
888 alloca = cir::AllocaOp::create(rewriter, loc, ptrType,
889 operand.getType(), "__ret_operand_tmp",
890 rewriter.getI64IntegerAttr(alignment));
891 }
892
893 // Store the operand value at the original return location.
894 {
895 mlir::OpBuilder::InsertionGuard guard(rewriter);
896 rewriter.setInsertionPoint(exitOp);
897 cir::StoreOp::create(rewriter, loc, operand, alloca,
898 /*isVolatile=*/false,
899 /*alignment=*/mlir::IntegerAttr(),
900 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
901 }
902
903 // Reload the value from the temporary alloca in the destination block.
904 rewriter.setInsertionPointToEnd(destBlock);
905 auto loaded = cir::LoadOp::create(
906 rewriter, loc, alloca, /*isDeref=*/false,
907 /*isVolatile=*/false, /*alignment=*/mlir::IntegerAttr(),
908 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
909 returnValues.push_back(loaded);
910 }
911 }
912 }
913
914 // Create the appropriate terminator for an exit operation in the dispatch
915 // block. For return ops with operands, this handles the dominance issue by
916 // either moving the operand's defining op to the dispatch block (if it's a
917 // trivial use) or by storing to a temporary alloca and loading it.
918 mlir::LogicalResult
919 createExitTerminator(mlir::Operation *exitOp, mlir::Location loc,
920 mlir::Block *continueBlock,
921 mlir::PatternRewriter &rewriter) const {
922 return llvm::TypeSwitch<mlir::Operation *, mlir::LogicalResult>(exitOp)
923 .Case<cir::YieldOp>([&](auto) {
924 // Yield becomes a branch to continue block.
925 cir::BrOp::create(rewriter, loc, continueBlock);
926 return mlir::success();
927 })
928 .Case<cir::BreakOp>([&](auto) {
929 // Break is preserved for later lowering by enclosing switch/loop.
930 cir::BreakOp::create(rewriter, loc);
931 return mlir::success();
932 })
933 .Case<cir::ContinueOp>([&](auto) {
934 // Continue is preserved for later lowering by enclosing loop.
935 cir::ContinueOp::create(rewriter, loc);
936 return mlir::success();
937 })
938 .Case<cir::ReturnOp>([&](auto returnOp) {
939 // Return from the cleanup exit. Note, if this is a return inside a
940 // nested cleanup scope, the flattening of the outer scope will handle
941 // branching through the outer cleanup.
942 if (returnOp.hasOperand()) {
943 llvm::SmallVector<mlir::Value, 2> returnValues;
944 getReturnOpOperands(returnOp, exitOp, loc, rewriter, returnValues);
945 cir::ReturnOp::create(rewriter, loc, returnValues);
946 } else {
947 cir::ReturnOp::create(rewriter, loc);
948 }
949 return mlir::success();
950 })
951 .Case<cir::GotoOp>([&](auto gotoOp) {
952 // Correct goto handling requires determining whether the goto
953 // branches out of the cleanup scope or stays within it.
954 // Although the goto necessarily exits the cleanup scope in the
955 // case where it is the only exit from the scope, it is left
956 // as unimplemented for now so that it can be generalized when
957 // multi-exit flattening is implemented.
958 cir::UnreachableOp::create(rewriter, loc);
959 return gotoOp.emitError(
960 "goto in cleanup scope is not yet implemented");
961 })
962 .Default([&](mlir::Operation *op) {
963 cir::UnreachableOp::create(rewriter, loc);
964 return op->emitError(
965 "unexpected exit operation in cleanup scope body");
966 });
967 }
968
969#ifndef NDEBUG
970 // Check that no block other than the last one in a region exits the region.
971 static bool regionExitsOnlyFromLastBlock(mlir::Region &region) {
972 for (mlir::Block &block : region) {
973 if (&block == &region.back())
974 continue;
975 bool expectedTerminator =
976 llvm::TypeSwitch<mlir::Operation *, bool>(block.getTerminator())
977 // It is theoretically possible to have a cleanup block with
978 // any of the following exits in non-final blocks, but we won't
979 // currently generate any CIR that does that, and being able to
980 // assume that it doesn't happen simplifies the implementation.
981 // If we ever need to handle this case, the code will need to
982 // be updated to handle it.
983 .Case<cir::YieldOp, cir::ReturnOp, cir::ResumeFlatOp,
984 cir::ContinueOp, cir::BreakOp, cir::GotoOp>(
985 [](auto) { return false; })
986 // We expect that call operations have not yet been rewritten
987 // as try_call operations. A call can unwind out of the cleanup
988 // scope, but we will be handling that during flattening. The
989 // only case where a try_call could be present inside an
990 // unflattened cleanup region is if the cleanup contained a
991 // nested try-catch region, and that isn't expected as of the
992 // time of this implementation. If it does, this could be
993 // updated to tolerate it.
994 .Case<cir::TryCallOp>([](auto) { return false; })
995 // Likewise, we don't expect to find an EH dispatch operation
996 // because we weren't expecting try-catch regions nested in the
997 // cleanup region.
998 .Case<cir::EhDispatchOp>([](auto) { return false; })
999 // In theory, it would be possible to have a flattened switch
1000 // operation that does not exit the cleanup region. For now,
1001 // that's not happening.
1002 .Case<cir::SwitchFlatOp>([](auto) { return false; })
1003 // These aren't expected either, but if they occur, they don't
1004 // exit the region, so that's OK.
1005 .Case<cir::UnreachableOp, cir::TrapOp>([](auto) { return true; })
1006 // Indirect branches are not expected.
1007 .Case<cir::IndirectBrOp>([](auto) { return false; })
1008 // We do expect branches, but we don't expect them to leave
1009 // the region.
1010 .Case<cir::BrOp>([&](cir::BrOp brOp) {
1011 assert(brOp.getDest()->getParent() == &region &&
1012 "branch destination is not in the region");
1013 return true;
1014 })
1015 .Case<cir::BrCondOp>([&](cir::BrCondOp brCondOp) {
1016 assert(brCondOp.getDestTrue()->getParent() == &region &&
1017 "branch destination is not in the region");
1018 assert(brCondOp.getDestFalse()->getParent() == &region &&
1019 "branch destination is not in the region");
1020 return true;
1021 })
1022 // What else could there be?
1023 .Default([](mlir::Operation *) -> bool {
1024 llvm_unreachable("unexpected terminator in cleanup region");
1025 });
1026 if (!expectedTerminator)
1027 return false;
1028 }
1029 return true;
1030 }
1031#endif
1032
1033 // Build the EH cleanup block structure by cloning the cleanup region. The
1034 // cloned entry block gets an !cir.eh_token argument and a cir.begin_cleanup
1035 // inserted at the top. All cir.yield terminators that might exit the cleanup
1036 // region are replaced with cir.end_cleanup + cir.resume.
1037 //
1038 // For a single-block cleanup region, this produces:
1039 //
1040 // ^eh_cleanup(%eh_token : !cir.eh_token):
1041 // %ct = cir.begin_cleanup %eh_token : !cir.eh_token -> !cir.cleanup_token
1042 // <cloned cleanup operations>
1043 // cir.end_cleanup %ct : !cir.cleanup_token
1044 // cir.resume %eh_token : !cir.eh_token
1045 //
1046 // For a multi-block cleanup region (e.g. containing a flattened cir.if),
1047 // the same wrapping is applied around the cloned block structure: the entry
1048 // block gets begin_cleanup and all exit blocks (those terminated by yield)
1049 // get end_cleanup + resume.
1050 //
1051 // If this cleanup scope is nested within a TryOp, the resume will be updated
1052 // to branch to the catch dispatch block of the enclosing try operation when
1053 // the TryOp is flattened.
1054 mlir::Block *buildEHCleanupBlocks(cir::CleanupScopeOp cleanupOp,
1055 mlir::Location loc,
1056 mlir::Block *insertBefore,
1057 mlir::PatternRewriter &rewriter) const {
1058 assert(regionExitsOnlyFromLastBlock(cleanupOp.getCleanupRegion()) &&
1059 "cleanup region has exits in non-final blocks");
1060
1061 // Track the block before the insertion point so we can find the cloned
1062 // blocks after cloning.
1063 mlir::Block *blockBeforeClone = insertBefore->getPrevNode();
1064
1065 // Clone the entire cleanup region before insertBefore.
1066 rewriter.cloneRegionBefore(cleanupOp.getCleanupRegion(), insertBefore);
1067
1068 // Find the first cloned block.
1069 mlir::Block *clonedEntry = blockBeforeClone
1070 ? blockBeforeClone->getNextNode()
1071 : &insertBefore->getParent()->front();
1072
1073 // Add the eh_token argument to the cloned entry block and insert
1074 // begin_cleanup at the top.
1075 auto ehTokenType = cir::EhTokenType::get(rewriter.getContext());
1076 mlir::Value ehToken = clonedEntry->addArgument(ehTokenType, loc);
1077
1078 rewriter.setInsertionPointToStart(clonedEntry);
1079 auto beginCleanup = cir::BeginCleanupOp::create(rewriter, loc, ehToken);
1080
1081 // Replace the yield terminator in the last cloned block with
1082 // end_cleanup + resume.
1083 mlir::Block *lastClonedBlock = insertBefore->getPrevNode();
1084 auto yieldOp =
1085 mlir::dyn_cast<cir::YieldOp>(lastClonedBlock->getTerminator());
1086 if (yieldOp) {
1087 rewriter.setInsertionPoint(yieldOp);
1088 cir::EndCleanupOp::create(rewriter, loc, beginCleanup.getCleanupToken());
1089 rewriter.replaceOpWithNewOp<cir::ResumeOp>(yieldOp, ehToken);
1090 } else {
1091 cleanupOp->emitError("Not yet implemented: cleanup region terminated "
1092 "with non-yield operation");
1093 }
1094
1095 return clonedEntry;
1096 }
1097
1098 // Flatten a cleanup scope. The body region's exits branch to the cleanup
1099 // block, and the cleanup block branches to destination blocks whose contents
1100 // depend on the type of operation that exited the body region. Yield becomes
1101 // a branch to the block after the cleanup scope, break and continue are
1102 // preserved for later lowering by enclosing switch or loop, and return
1103 // is preserved as is.
1104 //
1105 // If there are multiple exits from the cleanup body, a destination slot and
1106 // switch dispatch are used to continue to the correct destination after the
1107 // cleanup is complete. A destination slot alloca is created at the function
1108 // entry block. Each exit operation is replaced by a store of its unique ID to
1109 // the destination slot and a branch to cleanup. An operation is appended to
1110 // the to branch to a dispatch block that loads the destination slot and uses
1111 // switch.flat to branch to the correct destination.
1112 //
1113 // If the cleanup scope requires EH cleanup, any call operations in the body
1114 // that may throw are replaced with cir.try_call operations that unwind to an
1115 // EH cleanup block. The cleanup block(s) will be terminated with a cir.resume
1116 // operation. If this cleanup scope is enclosed by a try operation, the
1117 // flattening of the try operation flattening will replace the cir.resume with
1118 // a branch to a catch dispatch block. Otherwise, the cir.resume operation
1119 // remains in place and will unwind to the caller.
1120 mlir::LogicalResult
1121 flattenCleanup(cir::CleanupScopeOp cleanupOp,
1122 llvm::SmallVectorImpl<CleanupExit> &exits,
1123 llvm::SmallVectorImpl<cir::CallOp> &callsToRewrite,
1124 llvm::SmallVectorImpl<cir::ResumeOp> &resumeOpsToChain,
1125 mlir::PatternRewriter &rewriter) const {
1126 mlir::Location loc = cleanupOp.getLoc();
1127 cir::CleanupKind cleanupKind = cleanupOp.getCleanupKind();
1128 bool hasNormalCleanup = cleanupKind == cir::CleanupKind::Normal ||
1129 cleanupKind == cir::CleanupKind::All;
1130 bool hasEHCleanup = cleanupKind == cir::CleanupKind::EH ||
1131 cleanupKind == cir::CleanupKind::All;
1132 bool isMultiExit = exits.size() > 1;
1133
1134 // Get references to region blocks before inlining.
1135 mlir::Block *bodyEntry = &cleanupOp.getBodyRegion().front();
1136 mlir::Block *cleanupEntry = &cleanupOp.getCleanupRegion().front();
1137 mlir::Block *cleanupExit = &cleanupOp.getCleanupRegion().back();
1138 assert(regionExitsOnlyFromLastBlock(cleanupOp.getCleanupRegion()) &&
1139 "cleanup region has exits in non-final blocks");
1140 auto cleanupYield = dyn_cast<cir::YieldOp>(cleanupExit->getTerminator());
1141 if (!cleanupYield) {
1142 return rewriter.notifyMatchFailure(cleanupOp,
1143 "Not yet implemented: cleanup region "
1144 "terminated with non-yield operation");
1145 }
1146
1147 // For multiple exits from the body region, get or create a destination slot
1148 // at function entry. The slot is shared across all cleanup scopes in the
1149 // function. This is only needed if the cleanup scope requires normal
1150 // cleanup.
1151 cir::AllocaOp destSlot;
1152 if (isMultiExit && hasNormalCleanup) {
1153 auto funcOp = cleanupOp->getParentOfType<cir::FuncOp>();
1154 if (!funcOp)
1155 return cleanupOp->emitError("cleanup scope not inside a function");
1156 destSlot = getOrCreateCleanupDestSlot(funcOp, rewriter, loc);
1157 }
1158
1159 // Split the current block to create the insertion point.
1160 mlir::Block *currentBlock = rewriter.getInsertionBlock();
1161 mlir::Block *continueBlock =
1162 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
1163
1164 // Build EH cleanup blocks if needed. This must be done before inlining
1165 // the cleanup region since buildEHCleanupBlocks clones from it. The unwind
1166 // block is inserted before the EH cleanup entry so that the final layout
1167 // is: body -> normal cleanup -> exit -> unwind -> EH cleanup -> continue.
1168 // EH cleanup blocks are needed when there are throwing calls that need to
1169 // be rewritten to try_call, or when there are resume ops from
1170 // already-flattened inner cleanup scopes that need to chain through this
1171 // cleanup's EH handler.
1172 mlir::Block *unwindBlock = nullptr;
1173 mlir::Block *ehCleanupEntry = nullptr;
1174 if (hasEHCleanup &&
1175 (!callsToRewrite.empty() || !resumeOpsToChain.empty())) {
1176 ehCleanupEntry =
1177 buildEHCleanupBlocks(cleanupOp, loc, continueBlock, rewriter);
1178 // The unwind block is only needed when there are throwing calls that
1179 // need a shared unwind destination. Resume ops from inner cleanups
1180 // branch directly to the EH cleanup entry.
1181 if (!callsToRewrite.empty())
1182 unwindBlock = buildUnwindBlock(ehCleanupEntry, /*hasCleanup=*/true, loc,
1183 ehCleanupEntry, rewriter);
1184 }
1185
1186 // All normal flow blocks are inserted before this point — either before
1187 // the unwind block (if it exists), or before the EH cleanup entry (if EH
1188 // cleanup exists but no unwind block is needed), or before the continue
1189 // block.
1190 mlir::Block *normalInsertPt =
1191 unwindBlock ? unwindBlock
1192 : (ehCleanupEntry ? ehCleanupEntry : continueBlock);
1193
1194 // Inline the body region.
1195 rewriter.inlineRegionBefore(cleanupOp.getBodyRegion(), normalInsertPt);
1196
1197 // Inline the cleanup region for the normal cleanup path.
1198 if (hasNormalCleanup)
1199 rewriter.inlineRegionBefore(cleanupOp.getCleanupRegion(), normalInsertPt);
1200
1201 // Branch from current block to body entry.
1202 rewriter.setInsertionPointToEnd(currentBlock);
1203 cir::BrOp::create(rewriter, loc, bodyEntry);
1204
1205 // Handle normal exits.
1206 mlir::LogicalResult result = mlir::success();
1207 if (hasNormalCleanup) {
1208 // Create the exit/dispatch block (after cleanup, before continue).
1209 mlir::Block *exitBlock = rewriter.createBlock(normalInsertPt);
1210
1211 // Rewrite the cleanup region's yield to branch to exit block.
1212 rewriter.setInsertionPoint(cleanupYield);
1213 rewriter.replaceOpWithNewOp<cir::BrOp>(cleanupYield, exitBlock);
1214
1215 if (isMultiExit) {
1216 // Build the dispatch switch in the exit block.
1217 rewriter.setInsertionPointToEnd(exitBlock);
1218
1219 // Load the destination slot value.
1220 auto slotValue = cir::LoadOp::create(
1221 rewriter, loc, destSlot, /*isDeref=*/false,
1222 /*isVolatile=*/false, /*alignment=*/mlir::IntegerAttr(),
1223 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
1224
1225 // Create destination blocks for each exit and collect switch case info.
1226 llvm::SmallVector<mlir::APInt, 8> caseValues;
1227 llvm::SmallVector<mlir::Block *, 8> caseDestinations;
1228 llvm::SmallVector<mlir::ValueRange, 8> caseOperands;
1229 cir::IntType s32Type =
1230 cir::IntType::get(rewriter.getContext(), 32, /*isSigned=*/true);
1231
1232 for (const CleanupExit &exit : exits) {
1233 // Create a block for this destination.
1234 mlir::Block *destBlock = rewriter.createBlock(normalInsertPt);
1235 rewriter.setInsertionPointToEnd(destBlock);
1236 result =
1237 createExitTerminator(exit.exitOp, loc, continueBlock, rewriter);
1238
1239 // Add to switch cases.
1240 caseValues.push_back(
1241 llvm::APInt(32, static_cast<uint64_t>(exit.destinationId), true));
1242 caseDestinations.push_back(destBlock);
1243 caseOperands.push_back(mlir::ValueRange());
1244
1245 // Replace the original exit op with: store dest ID, branch to
1246 // cleanup.
1247 rewriter.setInsertionPoint(exit.exitOp);
1248 auto destIdConst = cir::ConstantOp::create(
1249 rewriter, loc, cir::IntAttr::get(s32Type, exit.destinationId));
1250 cir::StoreOp::create(rewriter, loc, destIdConst, destSlot,
1251 /*isVolatile=*/false,
1252 /*alignment=*/mlir::IntegerAttr(),
1253 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
1254 rewriter.replaceOpWithNewOp<cir::BrOp>(exit.exitOp, cleanupEntry);
1255
1256 // If the exit terminator creation failed, we're going to end up with
1257 // partially flattened code, but we'll also have reported an error so
1258 // that's OK. We need to finish out this function to keep the IR in a
1259 // valid state to help diagnose the error. This is a temporary
1260 // possibility during development. It shouldn't ever happen after the
1261 // implementation is complete.
1262 if (result.failed())
1263 break;
1264 }
1265
1266 // Create the default destination (unreachable).
1267 mlir::Block *defaultBlock = rewriter.createBlock(normalInsertPt);
1268 rewriter.setInsertionPointToEnd(defaultBlock);
1269 cir::UnreachableOp::create(rewriter, loc);
1270
1271 // Build the switch.flat operation in the exit block.
1272 rewriter.setInsertionPointToEnd(exitBlock);
1273 cir::SwitchFlatOp::create(rewriter, loc, slotValue, defaultBlock,
1274 mlir::ValueRange(), caseValues,
1275 caseDestinations, caseOperands);
1276 } else {
1277 // Single exit: put the appropriate terminator directly in the exit
1278 // block.
1279 rewriter.setInsertionPointToEnd(exitBlock);
1280 mlir::Operation *exitOp = exits[0].exitOp;
1281 result = createExitTerminator(exitOp, loc, continueBlock, rewriter);
1282
1283 // Replace body exit with branch to cleanup entry.
1284 rewriter.setInsertionPoint(exitOp);
1285 rewriter.replaceOpWithNewOp<cir::BrOp>(exitOp, cleanupEntry);
1286 }
1287 } else {
1288 // EH-only cleanup: normal exits skip the cleanup entirely.
1289 // Replace yield exits with branches to the continue block.
1290 for (CleanupExit &exit : exits) {
1291 if (isa<cir::YieldOp>(exit.exitOp)) {
1292 rewriter.setInsertionPoint(exit.exitOp);
1293 rewriter.replaceOpWithNewOp<cir::BrOp>(exit.exitOp, continueBlock);
1294 }
1295 // Non-yield exits (break, continue, return) stay as-is since no normal
1296 // cleanup is needed.
1297 }
1298 }
1299
1300 // Replace non-nothrow calls with try_call operations. All calls within
1301 // this cleanup scope share the same unwind destination.
1302 if (hasEHCleanup) {
1303 for (cir::CallOp callOp : callsToRewrite)
1304 replaceCallWithTryCall(callOp, unwindBlock, loc, rewriter);
1305 }
1306
1307 // Chain inner EH cleanup resume ops to this cleanup's EH handler.
1308 // Each cir.resume from an already-flattened inner cleanup is replaced
1309 // with a branch to the outer EH cleanup entry, passing the eh_token
1310 // from the inner's begin_cleanup so that the same in-flight exception
1311 // flows through the outer cleanup before unwinding to the caller.
1312 if (ehCleanupEntry) {
1313 for (cir::ResumeOp resumeOp : resumeOpsToChain) {
1314 mlir::Value ehToken = resumeOp.getEhToken();
1315 rewriter.setInsertionPoint(resumeOp);
1316 rewriter.replaceOpWithNewOp<cir::BrOp>(
1317 resumeOp, mlir::ValueRange{ehToken}, ehCleanupEntry);
1318 }
1319 }
1320
1321 // Erase the original cleanup scope op.
1322 rewriter.eraseOp(cleanupOp);
1323
1324 return result;
1325 }
1326
1327 mlir::LogicalResult
1328 matchAndRewrite(cir::CleanupScopeOp cleanupOp,
1329 mlir::PatternRewriter &rewriter) const override {
1330 mlir::OpBuilder::InsertionGuard guard(rewriter);
1331
1332 // Nested cleanup scopes and try operations must be flattened before the
1333 // enclosing cleanup scope so that EH cleanup inside them is properly
1334 // handled. Fail the match so the pattern rewriter processes them first.
1335 bool hasNestedOps = cleanupOp.getBodyRegion()
1336 .walk([&](mlir::Operation *op) {
1337 if (isa<cir::CleanupScopeOp, cir::TryOp>(op))
1338 return mlir::WalkResult::interrupt();
1339 return mlir::WalkResult::advance();
1340 })
1341 .wasInterrupted();
1342 if (hasNestedOps)
1343 return mlir::failure();
1344
1345 cir::CleanupKind cleanupKind = cleanupOp.getCleanupKind();
1346
1347 // Throwing calls in the cleanup region of an EH-enabled cleanup scope
1348 // are not yet supported. Such calls would need their own EH handling
1349 // (e.g., terminate or nested cleanup) during the unwind path.
1350 if (cleanupKind != cir::CleanupKind::Normal) {
1351 llvm::SmallVector<cir::CallOp> cleanupThrowingCalls;
1352 collectThrowingCalls(cleanupOp.getCleanupRegion(), cleanupThrowingCalls);
1353 if (!cleanupThrowingCalls.empty())
1354 return cleanupOp->emitError(
1355 "throwing calls in cleanup region are not yet implemented");
1356 }
1357
1358 // Collect all exits from the body region.
1359 llvm::SmallVector<CleanupExit> exits;
1360 int nextId = 0;
1361 collectExits(cleanupOp.getBodyRegion(), exits, nextId);
1362
1363 assert(!exits.empty() && "cleanup scope body has no exit");
1364
1365 // Collect non-nothrow calls that need to be converted to try_call.
1366 // This is only needed for EH and All cleanup kinds, but the vector
1367 // will simply be empty for Normal cleanup.
1368 llvm::SmallVector<cir::CallOp> callsToRewrite;
1369 if (cleanupKind != cir::CleanupKind::Normal)
1370 collectThrowingCalls(cleanupOp.getBodyRegion(), callsToRewrite);
1371
1372 // Collect resume ops from already-flattened inner cleanup scopes that
1373 // need to chain through this cleanup's EH handler.
1374 llvm::SmallVector<cir::ResumeOp> resumeOpsToChain;
1375 if (cleanupKind != cir::CleanupKind::Normal)
1376 collectResumeOps(cleanupOp.getBodyRegion(), resumeOpsToChain);
1377
1378 return flattenCleanup(cleanupOp, exits, callsToRewrite, resumeOpsToChain,
1379 rewriter);
1380 }
1381};
1382
1383class CIRTryOpFlattening : public mlir::OpRewritePattern<cir::TryOp> {
1384public:
1385 using OpRewritePattern<cir::TryOp>::OpRewritePattern;
1386
1387 // Build the catch dispatch block with a cir.eh.dispatch operation.
1388 // The dispatch block receives an !cir.eh_token argument and dispatches
1389 // to the appropriate catch handler blocks based on exception types.
1390 mlir::Block *buildCatchDispatchBlock(
1391 cir::TryOp tryOp, mlir::ArrayAttr handlerTypes,
1392 llvm::SmallVectorImpl<mlir::Block *> &catchHandlerBlocks,
1393 mlir::Location loc, mlir::Block *insertBefore,
1394 mlir::PatternRewriter &rewriter) const {
1395 mlir::Block *dispatchBlock = rewriter.createBlock(insertBefore);
1396 auto ehTokenType = cir::EhTokenType::get(rewriter.getContext());
1397 mlir::Value ehToken = dispatchBlock->addArgument(ehTokenType, loc);
1398
1399 rewriter.setInsertionPointToEnd(dispatchBlock);
1400
1401 // Build the catch types and destinations for the dispatch.
1402 llvm::SmallVector<mlir::Attribute> catchTypeAttrs;
1403 llvm::SmallVector<mlir::Block *> catchDests;
1404 mlir::Block *defaultDest = nullptr;
1405 bool defaultIsCatchAll = false;
1406
1407 for (auto [typeAttr, handlerBlock] :
1408 llvm::zip(handlerTypes, catchHandlerBlocks)) {
1409 if (mlir::isa<cir::CatchAllAttr>(typeAttr)) {
1410 assert(!defaultDest && "multiple catch_all or unwind handlers");
1411 defaultDest = handlerBlock;
1412 defaultIsCatchAll = true;
1413 } else if (mlir::isa<cir::UnwindAttr>(typeAttr)) {
1414 assert(!defaultDest && "multiple catch_all or unwind handlers");
1415 defaultDest = handlerBlock;
1416 defaultIsCatchAll = false;
1417 } else {
1418 // This is a typed catch handler (GlobalViewAttr with type info).
1419 catchTypeAttrs.push_back(typeAttr);
1420 catchDests.push_back(handlerBlock);
1421 }
1422 }
1423
1424 assert(defaultDest && "dispatch must have a catch_all or unwind handler");
1425
1426 mlir::ArrayAttr catchTypesArrayAttr;
1427 if (!catchTypeAttrs.empty())
1428 catchTypesArrayAttr = rewriter.getArrayAttr(catchTypeAttrs);
1429
1430 cir::EhDispatchOp::create(rewriter, loc, ehToken, catchTypesArrayAttr,
1431 defaultIsCatchAll, defaultDest, catchDests);
1432
1433 return dispatchBlock;
1434 }
1435
1436 // Flatten a single catch handler region. Each handler region has an
1437 // !cir.eh_token argument and starts with cir.begin_catch, followed by
1438 // a cir.cleanup.scope containing the handler body (with cir.end_catch in
1439 // its cleanup region), and ending with cir.yield.
1440 //
1441 // After flattening, the handler region becomes a block that receives the
1442 // eh_token, calls begin_catch, runs the handler body inline, calls
1443 // end_catch, and branches to the continue block.
1444 //
1445 // The cleanup scope inside the catch handler is expected to have been
1446 // flattened before we get here, so what we see in the handler region is
1447 // already flat code with begin_catch at the top and end_catch in any place
1448 // that we would exit the catch handler. We just need to inline the region
1449 // and fix up terminators.
1450 mlir::Block *flattenCatchHandler(mlir::Region &handlerRegion,
1451 mlir::Block *continueBlock,
1452 mlir::Location loc,
1453 mlir::Block *insertBefore,
1454 mlir::PatternRewriter &rewriter) const {
1455 // The handler region entry block has the !cir.eh_token argument.
1456 mlir::Block *handlerEntry = &handlerRegion.front();
1457
1458 // Inline the handler region before insertBefore.
1459 rewriter.inlineRegionBefore(handlerRegion, insertBefore);
1460
1461 // Replace yield terminators in the handler with branches to continue.
1462 for (mlir::Block &block : llvm::make_range(handlerEntry->getIterator(),
1463 insertBefore->getIterator())) {
1464 if (auto yieldOp = dyn_cast<cir::YieldOp>(block.getTerminator())) {
1465 // Verify that end_catch is the last non-branch operation before
1466 // this yield. After cleanup scope flattening, end_catch may be in
1467 // a predecessor block rather than immediately before the yield.
1468 // Walk back through the single-predecessor chain, verifying that
1469 // each intermediate block contains only a branch terminator, until
1470 // we find end_catch as the last non-terminator in some block.
1471 assert([&]() {
1472 // Check if end_catch immediately precedes the yield.
1473 if (mlir::Operation *prev = yieldOp->getPrevNode())
1474 return isa<cir::EndCatchOp>(prev);
1475 // The yield is alone in its block. Walk backward through
1476 // single-predecessor blocks that contain only a branch.
1477 mlir::Block *b = block.getSinglePredecessor();
1478 while (b) {
1479 mlir::Operation *term = b->getTerminator();
1480 if (mlir::Operation *prev = term->getPrevNode())
1481 return isa<cir::EndCatchOp>(prev);
1482 if (!isa<cir::BrOp>(term))
1483 return false;
1484 b = b->getSinglePredecessor();
1485 }
1486 return false;
1487 }() && "expected end_catch as last operation before yield "
1488 "in catch handler, with only branches in between");
1489 rewriter.setInsertionPoint(yieldOp);
1490 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, continueBlock);
1491 }
1492 }
1493
1494 return handlerEntry;
1495 }
1496
1497 // Flatten an unwind handler region. The unwind region just contains a
1498 // cir.resume that continues unwinding. We inline it and leave the resume
1499 // in place. If this try op is nested inside an EH cleanup or another try op,
1500 // the enclosing op will rewrite the resume as a branch to its cleanup or
1501 // dispatch block when it is flattened. Otherwise, the resume will unwind to
1502 // the caller.
1503 mlir::Block *flattenUnwindHandler(mlir::Region &unwindRegion,
1504 mlir::Location loc,
1505 mlir::Block *insertBefore,
1506 mlir::PatternRewriter &rewriter) const {
1507 mlir::Block *unwindEntry = &unwindRegion.front();
1508 rewriter.inlineRegionBefore(unwindRegion, insertBefore);
1509 return unwindEntry;
1510 }
1511
1512 mlir::LogicalResult
1513 matchAndRewrite(cir::TryOp tryOp,
1514 mlir::PatternRewriter &rewriter) const override {
1515 // Nested try ops and cleanup scopes must be flattened before the enclosing
1516 // try so that EH cleanup inside them is properly handled. Fail the match so
1517 // the pattern rewriter will process nested ops first.
1518 bool hasNestedOps =
1519 tryOp
1520 ->walk([&](mlir::Operation *op) {
1521 if (isa<cir::CleanupScopeOp, cir::TryOp>(op) && op != tryOp)
1522 return mlir::WalkResult::interrupt();
1523 return mlir::WalkResult::advance();
1524 })
1525 .wasInterrupted();
1526 if (hasNestedOps)
1527 return mlir::failure();
1528
1529 mlir::OpBuilder::InsertionGuard guard(rewriter);
1530 mlir::Location loc = tryOp.getLoc();
1531
1532 mlir::ArrayAttr handlerTypes = tryOp.getHandlerTypesAttr();
1533 mlir::MutableArrayRef<mlir::Region> handlerRegions =
1534 tryOp.getHandlerRegions();
1535
1536 // Collect throwing calls in the try body.
1537 llvm::SmallVector<cir::CallOp> callsToRewrite;
1538 collectThrowingCalls(tryOp.getTryRegion(), callsToRewrite);
1539
1540 // Collect resume ops from already-flattened cleanup scopes in the try body.
1541 llvm::SmallVector<cir::ResumeOp> resumeOpsToChain;
1542 collectResumeOps(tryOp.getTryRegion(), resumeOpsToChain);
1543
1544 // Split the current block and inline the try body.
1545 mlir::Block *currentBlock = rewriter.getInsertionBlock();
1546 mlir::Block *continueBlock =
1547 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
1548
1549 // Get references to try body blocks before inlining.
1550 mlir::Block *bodyEntry = &tryOp.getTryRegion().front();
1551 mlir::Block *bodyExit = &tryOp.getTryRegion().back();
1552
1553 // Inline the try body region before the continue block.
1554 rewriter.inlineRegionBefore(tryOp.getTryRegion(), continueBlock);
1555
1556 // Branch from the current block to the body entry.
1557 rewriter.setInsertionPointToEnd(currentBlock);
1558 cir::BrOp::create(rewriter, loc, bodyEntry);
1559
1560 // Replace the try body's yield terminator with a branch to continue.
1561 if (auto bodyYield = dyn_cast<cir::YieldOp>(bodyExit->getTerminator())) {
1562 rewriter.setInsertionPoint(bodyYield);
1563 rewriter.replaceOpWithNewOp<cir::BrOp>(bodyYield, continueBlock);
1564 }
1565
1566 // If there are no handlers, we're done.
1567 if (!handlerTypes || handlerTypes.empty()) {
1568 rewriter.eraseOp(tryOp);
1569 return mlir::success();
1570 }
1571
1572 // If there are no throwing calls and no resume ops from inner cleanup
1573 // scopes, exceptions cannot reach the catch handlers. Skip handler and
1574 // dispatch block creation — the handler regions will be dropped when
1575 // the try op is erased.
1576 if (callsToRewrite.empty() && resumeOpsToChain.empty()) {
1577 rewriter.eraseOp(tryOp);
1578 return mlir::success();
1579 }
1580
1581 // Build the catch handler blocks.
1582
1583 // First, flatten all handler regions and collect the entry blocks.
1584 llvm::SmallVector<mlir::Block *> catchHandlerBlocks;
1585
1586 for (const auto &[idx, typeAttr] : llvm::enumerate(handlerTypes)) {
1587 mlir::Region &handlerRegion = handlerRegions[idx];
1588
1589 if (mlir::isa<cir::UnwindAttr>(typeAttr)) {
1590 mlir::Block *unwindEntry =
1591 flattenUnwindHandler(handlerRegion, loc, continueBlock, rewriter);
1592 catchHandlerBlocks.push_back(unwindEntry);
1593 } else {
1594 mlir::Block *handlerEntry = flattenCatchHandler(
1595 handlerRegion, continueBlock, loc, continueBlock, rewriter);
1596 catchHandlerBlocks.push_back(handlerEntry);
1597 }
1598 }
1599
1600 // Build the catch dispatch block.
1601 mlir::Block *dispatchBlock =
1602 buildCatchDispatchBlock(tryOp, handlerTypes, catchHandlerBlocks, loc,
1603 catchHandlerBlocks.front(), rewriter);
1604
1605 // Build a block to be the unwind desination for throwing calls and replace
1606 // the calls with try_call ops. Note that the unwind block created here is
1607 // something different than the unwind handler that we may have created
1608 // above. The unwind handler continues unwinding after uncaught exceptions.
1609 // This is the block that will eventually become the landing pad for invoke
1610 // instructions.
1611 bool hasCleanup = tryOp.getCleanup();
1612 if (!callsToRewrite.empty()) {
1613 // Create a shared unwind block for all throwing calls.
1614 mlir::Block *unwindBlock = buildUnwindBlock(dispatchBlock, hasCleanup,
1615 loc, dispatchBlock, rewriter);
1616
1617 for (cir::CallOp callOp : callsToRewrite)
1618 replaceCallWithTryCall(callOp, unwindBlock, loc, rewriter);
1619 }
1620
1621 // Chain resume ops from inner cleanup scopes.
1622 // Resume ops from already-flattened cleanup scopes within the try body
1623 // should branch to the catch dispatch block instead of unwinding directly.
1624 for (cir::ResumeOp resumeOp : resumeOpsToChain) {
1625 mlir::Value ehToken = resumeOp.getEhToken();
1626 rewriter.setInsertionPoint(resumeOp);
1627 rewriter.replaceOpWithNewOp<cir::BrOp>(
1628 resumeOp, mlir::ValueRange{ehToken}, dispatchBlock);
1629 }
1630
1631 // Finally, erase the original try op ----
1632 rewriter.eraseOp(tryOp);
1633
1634 return mlir::success();
1635 }
1636};
1637
1638void populateFlattenCFGPatterns(RewritePatternSet &patterns) {
1639 patterns
1640 .add<CIRIfFlattening, CIRLoopOpInterfaceFlattening, CIRScopeOpFlattening,
1641 CIRSwitchOpFlattening, CIRTernaryOpFlattening,
1642 CIRCleanupScopeOpFlattening, CIRTryOpFlattening>(
1643 patterns.getContext());
1644}
1645
1646void CIRFlattenCFGPass::runOnOperation() {
1647 RewritePatternSet patterns(&getContext());
1648 populateFlattenCFGPatterns(patterns);
1649
1650 // Collect operations to apply patterns.
1651 llvm::SmallVector<Operation *, 16> ops;
1652 getOperation()->walk<mlir::WalkOrder::PostOrder>([&](Operation *op) {
1653 if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp, CleanupScopeOp,
1654 TryOp>(op))
1655 ops.push_back(op);
1656 });
1657
1658 // Apply patterns.
1659 if (applyOpPatternsGreedily(ops, std::move(patterns)).failed())
1660 signalPassFailure();
1661}
1662
1663} // namespace
1664
1665namespace mlir {
1666
1667std::unique_ptr<Pass> createCIRFlattenCFGPass() {
1668 return std::make_unique<CIRFlattenCFGPass>();
1669}
1670
1671} // namespace mlir
__device__ __2f16 b
llvm::APInt APInt
Definition FixedPoint.h:19
ASTEdit insertBefore(RangeSelector S, TextGenerator Replacement)
Inserts Replacement before S, leaving the source selected by \S unchanged.
unsigned long uint64_t
std::unique_ptr< Pass > createCIRFlattenCFGPass()
int const char * function
Definition c++config.h:31
float __ovld __cnfn step(float, float)
Returns 0.0 if x < edge, otherwise it returns 1.0.
static bool stackSaveOp()