15#include "mlir/Dialect/Func/IR/FuncOps.h"
16#include "mlir/IR/Block.h"
17#include "mlir/IR/Builders.h"
18#include "mlir/IR/PatternMatch.h"
19#include "mlir/Support/LogicalResult.h"
20#include "mlir/Transforms/DialectConversion.h"
21#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
26#include "llvm/ADT/TypeSwitch.h"
32#define GEN_PASS_DEF_CIRFLATTENCFG
33#include "clang/CIR/Dialect/Passes.h.inc"
39void lowerTerminator(mlir::Operation *op, mlir::Block *dest,
40 mlir::PatternRewriter &rewriter) {
41 assert(op->hasTrait<mlir::OpTrait::IsTerminator>() &&
"not a terminator");
42 mlir::OpBuilder::InsertionGuard guard(rewriter);
43 rewriter.setInsertionPoint(op);
44 rewriter.replaceOpWithNewOp<cir::BrOp>(op, dest);
49template <
typename... Ops>
50void walkRegionSkipping(
52 mlir::function_ref<mlir::WalkResult(mlir::Operation *)> callback) {
53 region.walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *op) {
55 return mlir::WalkResult::skip();
60struct CIRFlattenCFGPass :
public impl::CIRFlattenCFGBase<CIRFlattenCFGPass> {
62 CIRFlattenCFGPass() =
default;
63 void runOnOperation()
override;
66struct CIRIfFlattening :
public mlir::OpRewritePattern<cir::IfOp> {
67 using OpRewritePattern<IfOp>::OpRewritePattern;
70 matchAndRewrite(cir::IfOp ifOp,
71 mlir::PatternRewriter &rewriter)
const override {
72 mlir::OpBuilder::InsertionGuard guard(rewriter);
73 mlir::Location loc = ifOp.getLoc();
74 bool emptyElse = ifOp.getElseRegion().empty();
75 mlir::Block *currentBlock = rewriter.getInsertionBlock();
76 mlir::Block *remainingOpsBlock =
77 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
78 mlir::Block *continueBlock;
79 if (ifOp->getResults().empty())
80 continueBlock = remainingOpsBlock;
82 llvm_unreachable(
"NYI");
85 mlir::Block *thenBeforeBody = &ifOp.getThenRegion().front();
86 mlir::Block *thenAfterBody = &ifOp.getThenRegion().back();
87 rewriter.inlineRegionBefore(ifOp.getThenRegion(), continueBlock);
89 rewriter.setInsertionPointToEnd(thenAfterBody);
90 if (
auto thenYieldOp =
91 dyn_cast<cir::YieldOp>(thenAfterBody->getTerminator())) {
92 rewriter.replaceOpWithNewOp<cir::BrOp>(thenYieldOp, thenYieldOp.getArgs(),
96 rewriter.setInsertionPointToEnd(continueBlock);
99 mlir::Block *elseBeforeBody =
nullptr;
100 mlir::Block *elseAfterBody =
nullptr;
102 elseBeforeBody = &ifOp.getElseRegion().front();
103 elseAfterBody = &ifOp.getElseRegion().back();
104 rewriter.inlineRegionBefore(ifOp.getElseRegion(), continueBlock);
106 elseBeforeBody = elseAfterBody = continueBlock;
109 rewriter.setInsertionPointToEnd(currentBlock);
110 cir::BrCondOp::create(rewriter, loc, ifOp.getCondition(), thenBeforeBody,
114 rewriter.setInsertionPointToEnd(elseAfterBody);
115 if (
auto elseYieldOP =
116 dyn_cast<cir::YieldOp>(elseAfterBody->getTerminator())) {
117 rewriter.replaceOpWithNewOp<cir::BrOp>(
118 elseYieldOP, elseYieldOP.getArgs(), continueBlock);
122 rewriter.replaceOp(ifOp, continueBlock->getArguments());
123 return mlir::success();
127class CIRScopeOpFlattening :
public mlir::OpRewritePattern<cir::ScopeOp> {
129 using OpRewritePattern<cir::ScopeOp>::OpRewritePattern;
132 matchAndRewrite(cir::ScopeOp scopeOp,
133 mlir::PatternRewriter &rewriter)
const override {
134 mlir::OpBuilder::InsertionGuard guard(rewriter);
135 mlir::Location loc = scopeOp.getLoc();
143 if (scopeOp.isEmpty()) {
144 rewriter.eraseOp(scopeOp);
145 return mlir::success();
150 mlir::Block *currentBlock = rewriter.getInsertionBlock();
151 mlir::Block *continueBlock =
152 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
153 if (scopeOp.getNumResults() > 0)
154 continueBlock->addArguments(scopeOp.getResultTypes(), loc);
157 mlir::Block *beforeBody = &scopeOp.getScopeRegion().front();
158 mlir::Block *afterBody = &scopeOp.getScopeRegion().back();
159 rewriter.inlineRegionBefore(scopeOp.getScopeRegion(), continueBlock);
162 rewriter.setInsertionPointToEnd(currentBlock);
164 cir::BrOp::create(rewriter, loc, mlir::ValueRange(), beforeBody);
168 rewriter.setInsertionPointToEnd(afterBody);
169 if (
auto yieldOp = dyn_cast<cir::YieldOp>(afterBody->getTerminator())) {
170 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, yieldOp.getArgs(),
175 rewriter.replaceOp(scopeOp, continueBlock->getArguments());
177 return mlir::success();
181class CIRSwitchOpFlattening :
public mlir::OpRewritePattern<cir::SwitchOp> {
183 using OpRewritePattern<cir::SwitchOp>::OpRewritePattern;
185 inline void rewriteYieldOp(mlir::PatternRewriter &rewriter,
186 cir::YieldOp yieldOp,
187 mlir::Block *destination)
const {
188 rewriter.setInsertionPoint(yieldOp);
189 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, yieldOp.getOperands(),
194 Block *condBrToRangeDestination(cir::SwitchOp op,
195 mlir::PatternRewriter &rewriter,
196 mlir::Block *rangeDestination,
197 mlir::Block *defaultDestination,
198 const APInt &lowerBound,
199 const APInt &upperBound)
const {
200 assert(lowerBound.sle(upperBound) &&
"Invalid range");
201 mlir::Block *resBlock = rewriter.createBlock(defaultDestination);
202 cir::IntType sIntType = cir::IntType::get(op.getContext(), 32,
true);
203 cir::IntType uIntType = cir::IntType::get(op.getContext(), 32,
false);
205 cir::ConstantOp rangeLength = cir::ConstantOp::create(
206 rewriter, op.getLoc(),
207 cir::IntAttr::get(sIntType, upperBound - lowerBound));
209 cir::ConstantOp lowerBoundValue = cir::ConstantOp::create(
210 rewriter, op.getLoc(), cir::IntAttr::get(sIntType, lowerBound));
211 mlir::Value diffValue = cir::SubOp::create(
212 rewriter, op.getLoc(), op.getCondition(), lowerBoundValue);
215 cir::CastOp uDiffValue = cir::CastOp::create(
216 rewriter, op.getLoc(), uIntType, CastKind::integral, diffValue);
217 cir::CastOp uRangeLength = cir::CastOp::create(
218 rewriter, op.getLoc(), uIntType, CastKind::integral, rangeLength);
220 cir::CmpOp cmpResult = cir::CmpOp::create(
221 rewriter, op.getLoc(), cir::CmpOpKind::le, uDiffValue, uRangeLength);
222 cir::BrCondOp::create(rewriter, op.getLoc(), cmpResult, rangeDestination,
228 matchAndRewrite(cir::SwitchOp op,
229 mlir::PatternRewriter &rewriter)
const override {
233 bool hasNestedCleanup = op->walk([&](cir::CleanupScopeOp) {
234 return mlir::WalkResult::interrupt();
236 if (hasNestedCleanup)
237 return mlir::failure();
239 llvm::SmallVector<CaseOp> cases;
240 op.collectCases(cases);
244 rewriter.eraseOp(op);
245 return mlir::success();
249 mlir::Block *exitBlock = rewriter.splitBlock(
250 rewriter.getBlock(), op->getNextNode()->getIterator());
263 cir::YieldOp switchYield =
nullptr;
265 for (mlir::Block &block :
266 llvm::make_early_inc_range(op.getBody().getBlocks()))
267 if (
auto yieldOp = dyn_cast<cir::YieldOp>(block.getTerminator()))
268 switchYield = yieldOp;
270 assert(!op.getBody().empty());
271 mlir::Block *originalBlock = op->getBlock();
272 mlir::Block *swopBlock =
273 rewriter.splitBlock(originalBlock, op->getIterator());
274 rewriter.inlineRegionBefore(op.getBody(), exitBlock);
277 rewriteYieldOp(rewriter, switchYield, exitBlock);
279 rewriter.setInsertionPointToEnd(originalBlock);
280 cir::BrOp::create(rewriter, op.getLoc(), swopBlock);
285 llvm::SmallVector<mlir::APInt, 8> caseValues;
286 llvm::SmallVector<mlir::Block *, 8> caseDestinations;
287 llvm::SmallVector<mlir::ValueRange, 8> caseOperands;
289 llvm::SmallVector<std::pair<APInt, APInt>> rangeValues;
290 llvm::SmallVector<mlir::Block *> rangeDestinations;
291 llvm::SmallVector<mlir::ValueRange> rangeOperands;
294 mlir::Block *defaultDestination = exitBlock;
295 mlir::ValueRange defaultOperands = exitBlock->getArguments();
298 for (cir::CaseOp caseOp : cases) {
299 mlir::Region ®ion = caseOp.getCaseRegion();
302 switch (caseOp.getKind()) {
303 case cir::CaseOpKind::Default:
304 defaultDestination = ®ion.front();
305 defaultOperands = defaultDestination->getArguments();
307 case cir::CaseOpKind::Range:
308 assert(caseOp.getValue().size() == 2 &&
309 "Case range should have 2 case value");
310 rangeValues.push_back(
311 {cast<cir::IntAttr>(caseOp.getValue()[0]).getValue(),
312 cast<cir::IntAttr>(caseOp.getValue()[1]).getValue()});
313 rangeDestinations.push_back(®ion.front());
314 rangeOperands.push_back(rangeDestinations.back()->getArguments());
316 case cir::CaseOpKind::Anyof:
317 case cir::CaseOpKind::Equal:
319 for (
const mlir::Attribute &value : caseOp.getValue()) {
320 caseValues.push_back(cast<cir::IntAttr>(value).getValue());
321 caseDestinations.push_back(®ion.front());
322 caseOperands.push_back(caseDestinations.back()->getArguments());
328 walkRegionSkipping<cir::LoopOpInterface, cir::SwitchOp>(
329 region, [&](mlir::Operation *op) {
330 if (!isa<cir::BreakOp>(op))
331 return mlir::WalkResult::advance();
333 lowerTerminator(op, exitBlock, rewriter);
334 return mlir::WalkResult::skip();
338 for (mlir::Block &blk : region.getBlocks()) {
339 if (blk.getNumSuccessors())
342 if (
auto yieldOp = dyn_cast<cir::YieldOp>(blk.getTerminator())) {
343 mlir::Operation *nextOp = caseOp->getNextNode();
344 assert(nextOp &&
"caseOp is not expected to be the last op");
345 mlir::Block *oldBlock = nextOp->getBlock();
346 mlir::Block *newBlock =
347 rewriter.splitBlock(oldBlock, nextOp->getIterator());
348 rewriter.setInsertionPointToEnd(oldBlock);
349 cir::BrOp::create(rewriter, nextOp->getLoc(), mlir::ValueRange(),
351 rewriteYieldOp(rewriter, yieldOp, newBlock);
355 mlir::Block *oldBlock = caseOp->getBlock();
356 mlir::Block *newBlock =
357 rewriter.splitBlock(oldBlock, caseOp->getIterator());
359 mlir::Block &entryBlock = caseOp.getCaseRegion().front();
360 rewriter.inlineRegionBefore(caseOp.getCaseRegion(), newBlock);
363 rewriter.setInsertionPointToEnd(oldBlock);
364 cir::BrOp::create(rewriter, caseOp.getLoc(), &entryBlock);
368 for (cir::CaseOp caseOp : cases) {
369 mlir::Block *caseBlock = caseOp->getBlock();
372 if (caseBlock->hasNoPredecessors())
373 rewriter.eraseBlock(caseBlock);
375 rewriter.eraseOp(caseOp);
378 for (
auto [rangeVal, operand, destination] :
379 llvm::zip(rangeValues, rangeOperands, rangeDestinations)) {
380 APInt lowerBound = rangeVal.first;
381 APInt upperBound = rangeVal.second;
384 if (lowerBound.sgt(upperBound))
389 constexpr int kSmallRangeThreshold = 64;
390 if ((upperBound - lowerBound)
391 .ult(llvm::APInt(32, kSmallRangeThreshold))) {
392 for (APInt iValue = lowerBound; iValue.sle(upperBound); ++iValue) {
393 caseValues.push_back(iValue);
394 caseOperands.push_back(operand);
395 caseDestinations.push_back(destination);
401 condBrToRangeDestination(op, rewriter, destination,
402 defaultDestination, lowerBound, upperBound);
403 defaultOperands = operand;
407 rewriter.setInsertionPoint(op);
408 rewriter.replaceOpWithNewOp<cir::SwitchFlatOp>(
409 op, op.getCondition(), defaultDestination, defaultOperands, caseValues,
410 caseDestinations, caseOperands);
412 return mlir::success();
416class CIRLoopOpInterfaceFlattening
417 :
public mlir::OpInterfaceRewritePattern<cir::LoopOpInterface> {
419 using mlir::OpInterfaceRewritePattern<
420 cir::LoopOpInterface>::OpInterfaceRewritePattern;
422 inline void lowerConditionOp(cir::ConditionOp op, mlir::Block *body,
424 mlir::PatternRewriter &rewriter)
const {
425 mlir::OpBuilder::InsertionGuard guard(rewriter);
426 rewriter.setInsertionPoint(op);
427 rewriter.replaceOpWithNewOp<cir::BrCondOp>(op, op.getCondition(), body,
432 matchAndRewrite(cir::LoopOpInterface op,
433 mlir::PatternRewriter &rewriter)
const final {
437 bool hasNestedCleanup = op->walk([&](cir::CleanupScopeOp) {
438 return mlir::WalkResult::interrupt();
440 if (hasNestedCleanup)
441 return mlir::failure();
444 mlir::Block *entry = rewriter.getInsertionBlock();
446 rewriter.splitBlock(entry, rewriter.getInsertionPoint());
447 mlir::Block *cond = &op.getCond().front();
448 mlir::Block *body = &op.getBody().front();
450 (op.maybeGetStep() ? &op.maybeGetStep()->front() :
nullptr);
453 rewriter.setInsertionPointToEnd(entry);
454 cir::BrOp::create(rewriter, op.getLoc(), &op.getEntry().front());
457 auto conditionOp = cast<cir::ConditionOp>(cond->getTerminator());
458 lowerConditionOp(conditionOp, body, exit, rewriter);
465 mlir::Block *dest = (
step ?
step : cond);
466 op.walkBodySkippingNestedLoops([&](mlir::Operation *op) {
467 if (!isa<cir::ContinueOp>(op))
468 return mlir::WalkResult::advance();
470 lowerTerminator(op, dest, rewriter);
471 return mlir::WalkResult::skip();
475 walkRegionSkipping<cir::LoopOpInterface, cir::SwitchOp>(
476 op.getBody(), [&](mlir::Operation *op) {
477 if (!isa<cir::BreakOp>(op))
478 return mlir::WalkResult::advance();
480 lowerTerminator(op, exit, rewriter);
481 return mlir::WalkResult::skip();
485 for (mlir::Block &blk : op.getBody().getBlocks()) {
486 auto bodyYield = dyn_cast<cir::YieldOp>(blk.getTerminator());
488 lowerTerminator(bodyYield, (
step ?
step : cond), rewriter);
493 lowerTerminator(cast<cir::YieldOp>(
step->getTerminator()), cond,
497 rewriter.inlineRegionBefore(op.getCond(), exit);
498 rewriter.inlineRegionBefore(op.getBody(), exit);
500 rewriter.inlineRegionBefore(*op.maybeGetStep(), exit);
502 rewriter.eraseOp(op);
503 return mlir::success();
507class CIRTernaryOpFlattening :
public mlir::OpRewritePattern<cir::TernaryOp> {
509 using OpRewritePattern<cir::TernaryOp>::OpRewritePattern;
512 matchAndRewrite(cir::TernaryOp op,
513 mlir::PatternRewriter &rewriter)
const override {
514 Location loc = op->getLoc();
515 Block *condBlock = rewriter.getInsertionBlock();
516 Block::iterator opPosition = rewriter.getInsertionPoint();
517 Block *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition);
518 llvm::SmallVector<mlir::Location, 2> locs;
521 if (op->getResultTypes().size())
523 Block *continueBlock =
524 rewriter.createBlock(remainingOpsBlock, op->getResultTypes(), locs);
525 cir::BrOp::create(rewriter, loc, remainingOpsBlock);
527 Region &trueRegion = op.getTrueRegion();
528 Block *trueBlock = &trueRegion.front();
529 mlir::Operation *trueTerminator = trueRegion.back().getTerminator();
530 rewriter.setInsertionPointToEnd(&trueRegion.back());
533 if (
auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator)) {
534 rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
536 }
else if (isa<cir::UnreachableOp>(trueTerminator)) {
539 trueTerminator->emitError(
"unexpected terminator in ternary true region, "
540 "expected yield or unreachable, got: ")
541 << trueTerminator->getName();
542 return mlir::failure();
544 rewriter.inlineRegionBefore(trueRegion, continueBlock);
546 Block *falseBlock = continueBlock;
547 Region &falseRegion = op.getFalseRegion();
549 falseBlock = &falseRegion.front();
550 mlir::Operation *falseTerminator = falseRegion.back().getTerminator();
551 rewriter.setInsertionPointToEnd(&falseRegion.back());
554 if (
auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator)) {
555 rewriter.replaceOpWithNewOp<cir::BrOp>(
556 falseYieldOp, falseYieldOp.getArgs(), continueBlock);
557 }
else if (isa<cir::UnreachableOp>(falseTerminator)) {
560 falseTerminator->emitError(
"unexpected terminator in ternary false "
561 "region, expected yield or unreachable, got: ")
562 << falseTerminator->getName();
563 return mlir::failure();
565 rewriter.inlineRegionBefore(falseRegion, continueBlock);
567 rewriter.setInsertionPointToEnd(condBlock);
568 cir::BrCondOp::create(rewriter, loc, op.getCond(), trueBlock, falseBlock);
570 rewriter.replaceOp(op, continueBlock->getArguments());
573 return mlir::success();
580static cir::AllocaOp getOrCreateCleanupDestSlot(cir::FuncOp funcOp,
581 mlir::PatternRewriter &rewriter,
582 mlir::Location loc) {
583 mlir::Block &entryBlock = funcOp.getBody().front();
586 auto it = llvm::find_if(entryBlock, [](
auto &op) {
587 return mlir::isa<AllocaOp>(&op) &&
588 mlir::cast<AllocaOp>(&op).getCleanupDestSlot();
590 if (it != entryBlock.end())
591 return mlir::cast<cir::AllocaOp>(*it);
594 mlir::OpBuilder::InsertionGuard guard(rewriter);
595 rewriter.setInsertionPointToStart(&entryBlock);
596 cir::IntType s32Type =
597 cir::IntType::get(rewriter.getContext(), 32,
true);
598 cir::PointerType ptrToS32Type = cir::PointerType::get(s32Type);
600 uint64_t alignment = dataLayout.getAlignment(s32Type,
true).value();
601 auto allocaOp = cir::AllocaOp::create(
602 rewriter, loc, ptrToS32Type, s32Type,
"__cleanup_dest_slot",
603 rewriter.getI64IntegerAttr(alignment));
604 allocaOp.setCleanupDestSlot(
true);
616collectThrowingCalls(mlir::Region ®ion,
618 region.walk([&](cir::CallOp callOp) {
619 if (!callOp.getNothrow())
620 callsToRewrite.push_back(callOp);
630static void collectResumeOps(mlir::Region ®ion,
632 region.walk([&](cir::ResumeOp resumeOp) { resumeOps.push_back(resumeOp); });
637static void replaceCallWithTryCall(cir::CallOp callOp, mlir::Block *unwindDest,
639 mlir::PatternRewriter &rewriter) {
640 mlir::Block *callBlock = callOp->getBlock();
642 assert(!callOp.getNothrow() &&
"call is not expected to throw");
646 mlir::Block *normalDest =
647 rewriter.splitBlock(callBlock, std::next(callOp->getIterator()));
651 rewriter.setInsertionPoint(callOp);
652 cir::TryCallOp tryCallOp;
653 if (callOp.isIndirect()) {
654 mlir::Value indTarget = callOp.getIndirectCall();
655 auto ptrTy = mlir::cast<cir::PointerType>(indTarget.getType());
656 auto resTy = mlir::cast<cir::FuncType>(ptrTy.getPointee());
658 cir::TryCallOp::create(rewriter, loc, indTarget, resTy, normalDest,
659 unwindDest, callOp.getArgOperands());
661 mlir::Type resType = callOp->getNumResults() > 0
662 ? callOp->getResult(0).getType()
665 cir::TryCallOp::create(rewriter, loc, callOp.getCalleeAttr(), resType,
666 normalDest, unwindDest, callOp.getArgOperands());
670 if (callOp->getNumResults() > 0)
671 callOp->getResult(0).replaceAllUsesWith(tryCallOp.getResult());
673 rewriter.eraseOp(callOp);
679static mlir::Block *buildUnwindBlock(mlir::Block *dest,
bool hasCleanup,
681 mlir::Block *insertBefore,
682 mlir::PatternRewriter &rewriter) {
683 mlir::Block *unwindBlock = rewriter.createBlock(insertBefore);
684 rewriter.setInsertionPointToEnd(unwindBlock);
686 cir::EhInitiateOp::create(rewriter, loc, hasCleanup);
687 cir::BrOp::create(rewriter, loc, mlir::ValueRange{ehInitiate.getEhToken()},
692class CIRCleanupScopeOpFlattening
693 :
public mlir::OpRewritePattern<cir::CleanupScopeOp> {
695 using OpRewritePattern<cir::CleanupScopeOp>::OpRewritePattern;
700 mlir::Operation *exitOp;
706 CleanupExit(mlir::Operation *op,
int id) : exitOp(op), destinationId(id) {}
725 void collectExits(mlir::Region &cleanupBodyRegion,
726 llvm::SmallVectorImpl<CleanupExit> &exits,
731 for (mlir::Block &block : cleanupBodyRegion) {
732 auto *terminator = block.getTerminator();
733 if (isa<cir::YieldOp>(terminator))
734 exits.emplace_back(terminator, nextId++);
741 auto collectExitsInLoop = [&](mlir::Operation *loopOp) {
742 loopOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) {
743 if (isa<cir::ReturnOp, cir::GotoOp>(nestedOp))
744 exits.emplace_back(nestedOp, nextId++);
745 return mlir::WalkResult::advance();
750 std::function<void(mlir::Region &,
bool)> collectExitsInCleanup;
755 collectExitsInSwitch = [&](mlir::Operation *switchOp) {
756 switchOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) {
757 if (isa<cir::CleanupScopeOp>(nestedOp)) {
760 collectExitsInCleanup(
761 cast<cir::CleanupScopeOp>(nestedOp).getBodyRegion(),
763 return mlir::WalkResult::skip();
764 }
else if (isa<cir::LoopOpInterface>(nestedOp)) {
765 collectExitsInLoop(nestedOp);
766 return mlir::WalkResult::skip();
767 }
else if (isa<cir::ReturnOp, cir::GotoOp, cir::ContinueOp>(nestedOp)) {
768 exits.emplace_back(nestedOp, nextId++);
770 return mlir::WalkResult::advance();
777 collectExitsInCleanup = [&](mlir::Region ®ion,
bool ignoreBreak) {
778 region.walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *op) {
785 if (!ignoreBreak && isa<cir::BreakOp>(op)) {
786 exits.emplace_back(op, nextId++);
787 }
else if (isa<cir::ContinueOp, cir::ReturnOp, cir::GotoOp>(op)) {
788 exits.emplace_back(op, nextId++);
789 }
else if (isa<cir::CleanupScopeOp>(op)) {
791 collectExitsInCleanup(cast<cir::CleanupScopeOp>(op).getBodyRegion(),
793 return mlir::WalkResult::skip();
794 }
else if (isa<cir::LoopOpInterface>(op)) {
798 collectExitsInLoop(op);
799 return mlir::WalkResult::skip();
800 }
else if (isa<cir::SwitchOp>(op)) {
804 collectExitsInSwitch(op);
805 return mlir::WalkResult::skip();
807 return mlir::WalkResult::advance();
812 collectExitsInCleanup(cleanupBodyRegion,
false);
818 static bool shouldSinkReturnOperand(mlir::Value operand,
819 cir::ReturnOp returnOp) {
821 mlir::Operation *defOp = operand.getDefiningOp();
827 if (!mlir::isa<cir::ConstantOp, cir::LoadOp>(defOp))
831 if (!operand.hasOneUse())
835 if (defOp->getBlock() != returnOp->getBlock())
838 if (
auto loadOp = mlir::dyn_cast<cir::LoadOp>(defOp)) {
840 mlir::Value ptr = loadOp.getAddr();
841 auto funcOp = returnOp->getParentOfType<cir::FuncOp>();
842 assert(funcOp &&
"Return op has no function parent?");
843 mlir::Block &funcEntryBlock = funcOp.getBody().front();
847 mlir::dyn_cast_if_present<cir::AllocaOp>(ptr.getDefiningOp()))
848 return allocaOp->getBlock() == &funcEntryBlock;
854 assert(mlir::isa<cir::ConstantOp>(defOp) &&
"Expected constant op");
863 getReturnOpOperands(cir::ReturnOp returnOp, mlir::Operation *exitOp,
864 mlir::Location loc, mlir::PatternRewriter &rewriter,
865 llvm::SmallVectorImpl<mlir::Value> &returnValues)
const {
866 mlir::Block *destBlock = rewriter.getInsertionBlock();
867 auto funcOp = exitOp->getParentOfType<cir::FuncOp>();
868 assert(funcOp &&
"Return op has no function parent?");
869 mlir::Block &funcEntryBlock = funcOp.getBody().front();
871 for (mlir::Value operand : returnOp.getOperands()) {
872 if (shouldSinkReturnOperand(operand, returnOp)) {
874 mlir::Operation *defOp = operand.getDefiningOp();
875 defOp->moveBefore(destBlock, destBlock->end());
876 returnValues.push_back(operand);
879 cir::AllocaOp alloca;
881 mlir::OpBuilder::InsertionGuard guard(rewriter);
882 rewriter.setInsertionPointToStart(&funcEntryBlock);
883 cir::CIRDataLayout dataLayout(
884 funcOp->getParentOfType<mlir::ModuleOp>());
886 dataLayout.getAlignment(operand.getType(),
true).value();
887 cir::PointerType ptrType = cir::PointerType::get(operand.getType());
888 alloca = cir::AllocaOp::create(rewriter, loc, ptrType,
889 operand.getType(),
"__ret_operand_tmp",
890 rewriter.getI64IntegerAttr(alignment));
895 mlir::OpBuilder::InsertionGuard guard(rewriter);
896 rewriter.setInsertionPoint(exitOp);
897 cir::StoreOp::create(rewriter, loc, operand, alloca,
900 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
904 rewriter.setInsertionPointToEnd(destBlock);
905 auto loaded = cir::LoadOp::create(
906 rewriter, loc, alloca,
false,
907 false, mlir::IntegerAttr(),
908 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
909 returnValues.push_back(loaded);
919 createExitTerminator(mlir::Operation *exitOp, mlir::Location loc,
920 mlir::Block *continueBlock,
921 mlir::PatternRewriter &rewriter)
const {
922 return llvm::TypeSwitch<mlir::Operation *, mlir::LogicalResult>(exitOp)
923 .Case<cir::YieldOp>([&](
auto) {
925 cir::BrOp::create(rewriter, loc, continueBlock);
926 return mlir::success();
928 .Case<cir::BreakOp>([&](
auto) {
930 cir::BreakOp::create(rewriter, loc);
931 return mlir::success();
933 .Case<cir::ContinueOp>([&](
auto) {
935 cir::ContinueOp::create(rewriter, loc);
936 return mlir::success();
938 .Case<cir::ReturnOp>([&](
auto returnOp) {
942 if (returnOp.hasOperand()) {
943 llvm::SmallVector<mlir::Value, 2> returnValues;
944 getReturnOpOperands(returnOp, exitOp, loc, rewriter, returnValues);
945 cir::ReturnOp::create(rewriter, loc, returnValues);
947 cir::ReturnOp::create(rewriter, loc);
949 return mlir::success();
951 .Case<cir::GotoOp>([&](
auto gotoOp) {
958 cir::UnreachableOp::create(rewriter, loc);
959 return gotoOp.emitError(
960 "goto in cleanup scope is not yet implemented");
962 .
Default([&](mlir::Operation *op) {
963 cir::UnreachableOp::create(rewriter, loc);
964 return op->emitError(
965 "unexpected exit operation in cleanup scope body");
971 static bool regionExitsOnlyFromLastBlock(mlir::Region ®ion) {
972 for (mlir::Block &block : region) {
973 if (&block == ®ion.back())
975 bool expectedTerminator =
976 llvm::TypeSwitch<mlir::Operation *, bool>(block.getTerminator())
983 .Case<cir::YieldOp, cir::ReturnOp, cir::ResumeFlatOp,
984 cir::ContinueOp, cir::BreakOp, cir::GotoOp>(
985 [](
auto) {
return false; })
994 .Case<cir::TryCallOp>([](
auto) {
return false; })
998 .Case<cir::EhDispatchOp>([](
auto) {
return false; })
1002 .Case<cir::SwitchFlatOp>([](
auto) {
return false; })
1005 .Case<cir::UnreachableOp, cir::TrapOp>([](
auto) {
return true; })
1007 .Case<cir::IndirectBrOp>([](
auto) {
return false; })
1010 .Case<cir::BrOp>([&](cir::BrOp brOp) {
1011 assert(brOp.getDest()->getParent() == ®ion &&
1012 "branch destination is not in the region");
1015 .Case<cir::BrCondOp>([&](cir::BrCondOp brCondOp) {
1016 assert(brCondOp.getDestTrue()->getParent() == ®ion &&
1017 "branch destination is not in the region");
1018 assert(brCondOp.getDestFalse()->getParent() == ®ion &&
1019 "branch destination is not in the region");
1023 .
Default([](mlir::Operation *) ->
bool {
1024 llvm_unreachable(
"unexpected terminator in cleanup region");
1026 if (!expectedTerminator)
1054 mlir::Block *buildEHCleanupBlocks(cir::CleanupScopeOp cleanupOp,
1056 mlir::Block *insertBefore,
1057 mlir::PatternRewriter &rewriter)
const {
1058 assert(regionExitsOnlyFromLastBlock(cleanupOp.getCleanupRegion()) &&
1059 "cleanup region has exits in non-final blocks");
1063 mlir::Block *blockBeforeClone =
insertBefore->getPrevNode();
1066 rewriter.cloneRegionBefore(cleanupOp.getCleanupRegion(), insertBefore);
1069 mlir::Block *clonedEntry = blockBeforeClone
1070 ? blockBeforeClone->getNextNode()
1075 auto ehTokenType = cir::EhTokenType::get(rewriter.getContext());
1076 mlir::Value ehToken = clonedEntry->addArgument(ehTokenType, loc);
1078 rewriter.setInsertionPointToStart(clonedEntry);
1079 auto beginCleanup = cir::BeginCleanupOp::create(rewriter, loc, ehToken);
1083 mlir::Block *lastClonedBlock =
insertBefore->getPrevNode();
1085 mlir::dyn_cast<cir::YieldOp>(lastClonedBlock->getTerminator());
1087 rewriter.setInsertionPoint(yieldOp);
1088 cir::EndCleanupOp::create(rewriter, loc, beginCleanup.getCleanupToken());
1089 rewriter.replaceOpWithNewOp<cir::ResumeOp>(yieldOp, ehToken);
1091 cleanupOp->emitError(
"Not yet implemented: cleanup region terminated "
1092 "with non-yield operation");
1121 flattenCleanup(cir::CleanupScopeOp cleanupOp,
1122 llvm::SmallVectorImpl<CleanupExit> &exits,
1123 llvm::SmallVectorImpl<cir::CallOp> &callsToRewrite,
1124 llvm::SmallVectorImpl<cir::ResumeOp> &resumeOpsToChain,
1125 mlir::PatternRewriter &rewriter)
const {
1126 mlir::Location loc = cleanupOp.getLoc();
1127 cir::CleanupKind cleanupKind = cleanupOp.getCleanupKind();
1128 bool hasNormalCleanup = cleanupKind == cir::CleanupKind::Normal ||
1129 cleanupKind == cir::CleanupKind::All;
1130 bool hasEHCleanup = cleanupKind == cir::CleanupKind::EH ||
1131 cleanupKind == cir::CleanupKind::All;
1132 bool isMultiExit = exits.size() > 1;
1135 mlir::Block *bodyEntry = &cleanupOp.getBodyRegion().front();
1136 mlir::Block *cleanupEntry = &cleanupOp.getCleanupRegion().front();
1137 mlir::Block *cleanupExit = &cleanupOp.getCleanupRegion().back();
1138 assert(regionExitsOnlyFromLastBlock(cleanupOp.getCleanupRegion()) &&
1139 "cleanup region has exits in non-final blocks");
1140 auto cleanupYield = dyn_cast<cir::YieldOp>(cleanupExit->getTerminator());
1141 if (!cleanupYield) {
1142 return rewriter.notifyMatchFailure(cleanupOp,
1143 "Not yet implemented: cleanup region "
1144 "terminated with non-yield operation");
1151 cir::AllocaOp destSlot;
1152 if (isMultiExit && hasNormalCleanup) {
1153 auto funcOp = cleanupOp->getParentOfType<cir::FuncOp>();
1155 return cleanupOp->emitError(
"cleanup scope not inside a function");
1156 destSlot = getOrCreateCleanupDestSlot(funcOp, rewriter, loc);
1160 mlir::Block *currentBlock = rewriter.getInsertionBlock();
1161 mlir::Block *continueBlock =
1162 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
1172 mlir::Block *unwindBlock =
nullptr;
1173 mlir::Block *ehCleanupEntry =
nullptr;
1175 (!callsToRewrite.empty() || !resumeOpsToChain.empty())) {
1177 buildEHCleanupBlocks(cleanupOp, loc, continueBlock, rewriter);
1181 if (!callsToRewrite.empty())
1182 unwindBlock = buildUnwindBlock(ehCleanupEntry,
true, loc,
1183 ehCleanupEntry, rewriter);
1190 mlir::Block *normalInsertPt =
1191 unwindBlock ? unwindBlock
1192 : (ehCleanupEntry ? ehCleanupEntry : continueBlock);
1195 rewriter.inlineRegionBefore(cleanupOp.getBodyRegion(), normalInsertPt);
1198 if (hasNormalCleanup)
1199 rewriter.inlineRegionBefore(cleanupOp.getCleanupRegion(), normalInsertPt);
1202 rewriter.setInsertionPointToEnd(currentBlock);
1203 cir::BrOp::create(rewriter, loc, bodyEntry);
1206 mlir::LogicalResult result = mlir::success();
1207 if (hasNormalCleanup) {
1209 mlir::Block *exitBlock = rewriter.createBlock(normalInsertPt);
1212 rewriter.setInsertionPoint(cleanupYield);
1213 rewriter.replaceOpWithNewOp<cir::BrOp>(cleanupYield, exitBlock);
1217 rewriter.setInsertionPointToEnd(exitBlock);
1220 auto slotValue = cir::LoadOp::create(
1221 rewriter, loc, destSlot,
false,
1222 false, mlir::IntegerAttr(),
1223 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
1226 llvm::SmallVector<mlir::APInt, 8> caseValues;
1227 llvm::SmallVector<mlir::Block *, 8> caseDestinations;
1228 llvm::SmallVector<mlir::ValueRange, 8> caseOperands;
1229 cir::IntType s32Type =
1230 cir::IntType::get(rewriter.getContext(), 32,
true);
1232 for (
const CleanupExit &exit : exits) {
1234 mlir::Block *destBlock = rewriter.createBlock(normalInsertPt);
1235 rewriter.setInsertionPointToEnd(destBlock);
1237 createExitTerminator(exit.exitOp, loc, continueBlock, rewriter);
1240 caseValues.push_back(
1241 llvm::APInt(32,
static_cast<uint64_t>(exit.destinationId),
true));
1242 caseDestinations.push_back(destBlock);
1243 caseOperands.push_back(mlir::ValueRange());
1247 rewriter.setInsertionPoint(exit.exitOp);
1248 auto destIdConst = cir::ConstantOp::create(
1249 rewriter, loc, cir::IntAttr::get(s32Type, exit.destinationId));
1250 cir::StoreOp::create(rewriter, loc, destIdConst, destSlot,
1252 mlir::IntegerAttr(),
1253 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
1254 rewriter.replaceOpWithNewOp<cir::BrOp>(exit.exitOp, cleanupEntry);
1262 if (result.failed())
1267 mlir::Block *defaultBlock = rewriter.createBlock(normalInsertPt);
1268 rewriter.setInsertionPointToEnd(defaultBlock);
1269 cir::UnreachableOp::create(rewriter, loc);
1272 rewriter.setInsertionPointToEnd(exitBlock);
1273 cir::SwitchFlatOp::create(rewriter, loc, slotValue, defaultBlock,
1274 mlir::ValueRange(), caseValues,
1275 caseDestinations, caseOperands);
1279 rewriter.setInsertionPointToEnd(exitBlock);
1280 mlir::Operation *exitOp = exits[0].exitOp;
1281 result = createExitTerminator(exitOp, loc, continueBlock, rewriter);
1284 rewriter.setInsertionPoint(exitOp);
1285 rewriter.replaceOpWithNewOp<cir::BrOp>(exitOp, cleanupEntry);
1290 for (CleanupExit &exit : exits) {
1291 if (isa<cir::YieldOp>(exit.exitOp)) {
1292 rewriter.setInsertionPoint(exit.exitOp);
1293 rewriter.replaceOpWithNewOp<cir::BrOp>(exit.exitOp, continueBlock);
1303 for (cir::CallOp callOp : callsToRewrite)
1304 replaceCallWithTryCall(callOp, unwindBlock, loc, rewriter);
1312 if (ehCleanupEntry) {
1313 for (cir::ResumeOp resumeOp : resumeOpsToChain) {
1314 mlir::Value ehToken = resumeOp.getEhToken();
1315 rewriter.setInsertionPoint(resumeOp);
1316 rewriter.replaceOpWithNewOp<cir::BrOp>(
1317 resumeOp, mlir::ValueRange{ehToken}, ehCleanupEntry);
1322 rewriter.eraseOp(cleanupOp);
1328 matchAndRewrite(cir::CleanupScopeOp cleanupOp,
1329 mlir::PatternRewriter &rewriter)
const override {
1330 mlir::OpBuilder::InsertionGuard guard(rewriter);
1335 bool hasNestedOps = cleanupOp.getBodyRegion()
1336 .walk([&](mlir::Operation *op) {
1337 if (isa<cir::CleanupScopeOp, cir::TryOp>(op))
1338 return mlir::WalkResult::interrupt();
1339 return mlir::WalkResult::advance();
1343 return mlir::failure();
1345 cir::CleanupKind cleanupKind = cleanupOp.getCleanupKind();
1350 if (cleanupKind != cir::CleanupKind::Normal) {
1351 llvm::SmallVector<cir::CallOp> cleanupThrowingCalls;
1352 collectThrowingCalls(cleanupOp.getCleanupRegion(), cleanupThrowingCalls);
1353 if (!cleanupThrowingCalls.empty())
1354 return cleanupOp->emitError(
1355 "throwing calls in cleanup region are not yet implemented");
1359 llvm::SmallVector<CleanupExit> exits;
1361 collectExits(cleanupOp.getBodyRegion(), exits, nextId);
1363 assert(!exits.empty() &&
"cleanup scope body has no exit");
1368 llvm::SmallVector<cir::CallOp> callsToRewrite;
1369 if (cleanupKind != cir::CleanupKind::Normal)
1370 collectThrowingCalls(cleanupOp.getBodyRegion(), callsToRewrite);
1374 llvm::SmallVector<cir::ResumeOp> resumeOpsToChain;
1375 if (cleanupKind != cir::CleanupKind::Normal)
1376 collectResumeOps(cleanupOp.getBodyRegion(), resumeOpsToChain);
1378 return flattenCleanup(cleanupOp, exits, callsToRewrite, resumeOpsToChain,
1383class CIRTryOpFlattening :
public mlir::OpRewritePattern<cir::TryOp> {
1385 using OpRewritePattern<cir::TryOp>::OpRewritePattern;
1390 mlir::Block *buildCatchDispatchBlock(
1391 cir::TryOp tryOp, mlir::ArrayAttr handlerTypes,
1392 llvm::SmallVectorImpl<mlir::Block *> &catchHandlerBlocks,
1393 mlir::Location loc, mlir::Block *insertBefore,
1394 mlir::PatternRewriter &rewriter)
const {
1395 mlir::Block *dispatchBlock = rewriter.createBlock(insertBefore);
1396 auto ehTokenType = cir::EhTokenType::get(rewriter.getContext());
1397 mlir::Value ehToken = dispatchBlock->addArgument(ehTokenType, loc);
1399 rewriter.setInsertionPointToEnd(dispatchBlock);
1402 llvm::SmallVector<mlir::Attribute> catchTypeAttrs;
1403 llvm::SmallVector<mlir::Block *> catchDests;
1404 mlir::Block *defaultDest =
nullptr;
1405 bool defaultIsCatchAll =
false;
1407 for (
auto [typeAttr, handlerBlock] :
1408 llvm::zip(handlerTypes, catchHandlerBlocks)) {
1409 if (mlir::isa<cir::CatchAllAttr>(typeAttr)) {
1410 assert(!defaultDest &&
"multiple catch_all or unwind handlers");
1411 defaultDest = handlerBlock;
1412 defaultIsCatchAll =
true;
1413 }
else if (mlir::isa<cir::UnwindAttr>(typeAttr)) {
1414 assert(!defaultDest &&
"multiple catch_all or unwind handlers");
1415 defaultDest = handlerBlock;
1416 defaultIsCatchAll =
false;
1419 catchTypeAttrs.push_back(typeAttr);
1420 catchDests.push_back(handlerBlock);
1424 assert(defaultDest &&
"dispatch must have a catch_all or unwind handler");
1426 mlir::ArrayAttr catchTypesArrayAttr;
1427 if (!catchTypeAttrs.empty())
1428 catchTypesArrayAttr = rewriter.getArrayAttr(catchTypeAttrs);
1430 cir::EhDispatchOp::create(rewriter, loc, ehToken, catchTypesArrayAttr,
1431 defaultIsCatchAll, defaultDest, catchDests);
1433 return dispatchBlock;
1450 mlir::Block *flattenCatchHandler(mlir::Region &handlerRegion,
1451 mlir::Block *continueBlock,
1453 mlir::Block *insertBefore,
1454 mlir::PatternRewriter &rewriter)
const {
1456 mlir::Block *handlerEntry = &handlerRegion.front();
1459 rewriter.inlineRegionBefore(handlerRegion, insertBefore);
1462 for (mlir::Block &block : llvm::make_range(handlerEntry->getIterator(),
1464 if (
auto yieldOp = dyn_cast<cir::YieldOp>(block.getTerminator())) {
1473 if (mlir::Operation *prev = yieldOp->getPrevNode())
1474 return isa<cir::EndCatchOp>(prev);
1477 mlir::Block *
b = block.getSinglePredecessor();
1479 mlir::Operation *term =
b->getTerminator();
1480 if (mlir::Operation *prev = term->getPrevNode())
1481 return isa<cir::EndCatchOp>(prev);
1482 if (!isa<cir::BrOp>(term))
1484 b =
b->getSinglePredecessor();
1487 }() &&
"expected end_catch as last operation before yield "
1488 "in catch handler, with only branches in between");
1489 rewriter.setInsertionPoint(yieldOp);
1490 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, continueBlock);
1494 return handlerEntry;
1503 mlir::Block *flattenUnwindHandler(mlir::Region &unwindRegion,
1505 mlir::Block *insertBefore,
1506 mlir::PatternRewriter &rewriter)
const {
1507 mlir::Block *unwindEntry = &unwindRegion.front();
1508 rewriter.inlineRegionBefore(unwindRegion, insertBefore);
1513 matchAndRewrite(cir::TryOp tryOp,
1514 mlir::PatternRewriter &rewriter)
const override {
1520 ->walk([&](mlir::Operation *op) {
1521 if (isa<cir::CleanupScopeOp, cir::TryOp>(op) && op != tryOp)
1522 return mlir::WalkResult::interrupt();
1523 return mlir::WalkResult::advance();
1527 return mlir::failure();
1529 mlir::OpBuilder::InsertionGuard guard(rewriter);
1530 mlir::Location loc = tryOp.getLoc();
1532 mlir::ArrayAttr handlerTypes = tryOp.getHandlerTypesAttr();
1533 mlir::MutableArrayRef<mlir::Region> handlerRegions =
1534 tryOp.getHandlerRegions();
1537 llvm::SmallVector<cir::CallOp> callsToRewrite;
1538 collectThrowingCalls(tryOp.getTryRegion(), callsToRewrite);
1541 llvm::SmallVector<cir::ResumeOp> resumeOpsToChain;
1542 collectResumeOps(tryOp.getTryRegion(), resumeOpsToChain);
1545 mlir::Block *currentBlock = rewriter.getInsertionBlock();
1546 mlir::Block *continueBlock =
1547 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
1550 mlir::Block *bodyEntry = &tryOp.getTryRegion().front();
1551 mlir::Block *bodyExit = &tryOp.getTryRegion().back();
1554 rewriter.inlineRegionBefore(tryOp.getTryRegion(), continueBlock);
1557 rewriter.setInsertionPointToEnd(currentBlock);
1558 cir::BrOp::create(rewriter, loc, bodyEntry);
1561 if (
auto bodyYield = dyn_cast<cir::YieldOp>(bodyExit->getTerminator())) {
1562 rewriter.setInsertionPoint(bodyYield);
1563 rewriter.replaceOpWithNewOp<cir::BrOp>(bodyYield, continueBlock);
1567 if (!handlerTypes || handlerTypes.empty()) {
1568 rewriter.eraseOp(tryOp);
1569 return mlir::success();
1576 if (callsToRewrite.empty() && resumeOpsToChain.empty()) {
1577 rewriter.eraseOp(tryOp);
1578 return mlir::success();
1584 llvm::SmallVector<mlir::Block *> catchHandlerBlocks;
1586 for (
const auto &[idx, typeAttr] : llvm::enumerate(handlerTypes)) {
1587 mlir::Region &handlerRegion = handlerRegions[idx];
1589 if (mlir::isa<cir::UnwindAttr>(typeAttr)) {
1590 mlir::Block *unwindEntry =
1591 flattenUnwindHandler(handlerRegion, loc, continueBlock, rewriter);
1592 catchHandlerBlocks.push_back(unwindEntry);
1594 mlir::Block *handlerEntry = flattenCatchHandler(
1595 handlerRegion, continueBlock, loc, continueBlock, rewriter);
1596 catchHandlerBlocks.push_back(handlerEntry);
1601 mlir::Block *dispatchBlock =
1602 buildCatchDispatchBlock(tryOp, handlerTypes, catchHandlerBlocks, loc,
1603 catchHandlerBlocks.front(), rewriter);
1611 bool hasCleanup = tryOp.getCleanup();
1612 if (!callsToRewrite.empty()) {
1614 mlir::Block *unwindBlock = buildUnwindBlock(dispatchBlock, hasCleanup,
1615 loc, dispatchBlock, rewriter);
1617 for (cir::CallOp callOp : callsToRewrite)
1618 replaceCallWithTryCall(callOp, unwindBlock, loc, rewriter);
1624 for (cir::ResumeOp resumeOp : resumeOpsToChain) {
1625 mlir::Value ehToken = resumeOp.getEhToken();
1626 rewriter.setInsertionPoint(resumeOp);
1627 rewriter.replaceOpWithNewOp<cir::BrOp>(
1628 resumeOp, mlir::ValueRange{ehToken}, dispatchBlock);
1632 rewriter.eraseOp(tryOp);
1634 return mlir::success();
1638void populateFlattenCFGPatterns(RewritePatternSet &patterns) {
1640 .add<CIRIfFlattening, CIRLoopOpInterfaceFlattening, CIRScopeOpFlattening,
1641 CIRSwitchOpFlattening, CIRTernaryOpFlattening,
1642 CIRCleanupScopeOpFlattening, CIRTryOpFlattening>(
1643 patterns.getContext());
1646void CIRFlattenCFGPass::runOnOperation() {
1647 RewritePatternSet patterns(&getContext());
1648 populateFlattenCFGPatterns(patterns);
1651 llvm::SmallVector<Operation *, 16> ops;
1652 getOperation()->walk<mlir::WalkOrder::PostOrder>([&](Operation *op) {
1653 if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp, CleanupScopeOp,
1659 if (applyOpPatternsGreedily(ops, std::move(patterns)).failed())
1660 signalPassFailure();
1668 return std::make_unique<CIRFlattenCFGPass>();
std::unique_ptr< Pass > createCIRFlattenCFGPass()
int const char * function
float __ovld __cnfn step(float, float)
Returns 0.0 if x < edge, otherwise it returns 1.0.
static bool stackSaveOp()