15#include "mlir/Dialect/Func/IR/FuncOps.h"
16#include "mlir/IR/Block.h"
17#include "mlir/IR/Builders.h"
18#include "mlir/IR/PatternMatch.h"
19#include "mlir/Support/LogicalResult.h"
20#include "mlir/Transforms/DialectConversion.h"
21#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
26#include "llvm/ADT/TypeSwitch.h"
32#define GEN_PASS_DEF_CIRFLATTENCFG
33#include "clang/CIR/Dialect/Passes.h.inc"
39void lowerTerminator(mlir::Operation *op, mlir::Block *dest,
40 mlir::PatternRewriter &rewriter) {
41 assert(op->hasTrait<mlir::OpTrait::IsTerminator>() &&
"not a terminator");
42 mlir::OpBuilder::InsertionGuard guard(rewriter);
43 rewriter.setInsertionPoint(op);
44 rewriter.replaceOpWithNewOp<cir::BrOp>(op, dest);
49template <
typename... Ops>
50void walkRegionSkipping(
52 mlir::function_ref<mlir::WalkResult(mlir::Operation *)> callback) {
53 region.walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *op) {
55 return mlir::WalkResult::skip();
60struct CIRFlattenCFGPass :
public impl::CIRFlattenCFGBase<CIRFlattenCFGPass> {
62 CIRFlattenCFGPass() =
default;
63 void runOnOperation()
override;
66struct CIRIfFlattening :
public mlir::OpRewritePattern<cir::IfOp> {
67 using OpRewritePattern<IfOp>::OpRewritePattern;
70 matchAndRewrite(cir::IfOp ifOp,
71 mlir::PatternRewriter &rewriter)
const override {
72 mlir::OpBuilder::InsertionGuard guard(rewriter);
73 mlir::Location loc = ifOp.getLoc();
74 bool emptyElse = ifOp.getElseRegion().empty();
75 mlir::Block *currentBlock = rewriter.getInsertionBlock();
76 mlir::Block *remainingOpsBlock =
77 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
78 mlir::Block *continueBlock;
79 if (ifOp->getResults().empty())
80 continueBlock = remainingOpsBlock;
82 llvm_unreachable(
"NYI");
85 mlir::Block *thenBeforeBody = &ifOp.getThenRegion().front();
86 mlir::Block *thenAfterBody = &ifOp.getThenRegion().back();
87 rewriter.inlineRegionBefore(ifOp.getThenRegion(), continueBlock);
89 rewriter.setInsertionPointToEnd(thenAfterBody);
90 if (
auto thenYieldOp =
91 dyn_cast<cir::YieldOp>(thenAfterBody->getTerminator())) {
92 rewriter.replaceOpWithNewOp<cir::BrOp>(thenYieldOp, thenYieldOp.getArgs(),
96 rewriter.setInsertionPointToEnd(continueBlock);
99 mlir::Block *elseBeforeBody =
nullptr;
100 mlir::Block *elseAfterBody =
nullptr;
102 elseBeforeBody = &ifOp.getElseRegion().front();
103 elseAfterBody = &ifOp.getElseRegion().back();
104 rewriter.inlineRegionBefore(ifOp.getElseRegion(), continueBlock);
106 elseBeforeBody = elseAfterBody = continueBlock;
109 rewriter.setInsertionPointToEnd(currentBlock);
110 cir::BrCondOp::create(rewriter, loc, ifOp.getCondition(), thenBeforeBody,
114 rewriter.setInsertionPointToEnd(elseAfterBody);
115 if (
auto elseYieldOP =
116 dyn_cast<cir::YieldOp>(elseAfterBody->getTerminator())) {
117 rewriter.replaceOpWithNewOp<cir::BrOp>(
118 elseYieldOP, elseYieldOP.getArgs(), continueBlock);
122 rewriter.replaceOp(ifOp, continueBlock->getArguments());
123 return mlir::success();
127class CIRScopeOpFlattening :
public mlir::OpRewritePattern<cir::ScopeOp> {
129 using OpRewritePattern<cir::ScopeOp>::OpRewritePattern;
132 matchAndRewrite(cir::ScopeOp scopeOp,
133 mlir::PatternRewriter &rewriter)
const override {
134 mlir::OpBuilder::InsertionGuard guard(rewriter);
135 mlir::Location loc = scopeOp.getLoc();
143 if (scopeOp.isEmpty()) {
144 rewriter.eraseOp(scopeOp);
145 return mlir::success();
150 mlir::Block *currentBlock = rewriter.getInsertionBlock();
151 mlir::Block *continueBlock =
152 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
153 if (scopeOp.getNumResults() > 0)
154 continueBlock->addArguments(scopeOp.getResultTypes(), loc);
157 mlir::Block *beforeBody = &scopeOp.getScopeRegion().front();
158 mlir::Block *afterBody = &scopeOp.getScopeRegion().back();
159 rewriter.inlineRegionBefore(scopeOp.getScopeRegion(), continueBlock);
162 rewriter.setInsertionPointToEnd(currentBlock);
164 cir::BrOp::create(rewriter, loc, mlir::ValueRange(), beforeBody);
168 rewriter.setInsertionPointToEnd(afterBody);
169 if (
auto yieldOp = dyn_cast<cir::YieldOp>(afterBody->getTerminator())) {
170 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, yieldOp.getArgs(),
175 rewriter.replaceOp(scopeOp, continueBlock->getArguments());
177 return mlir::success();
181class CIRSwitchOpFlattening :
public mlir::OpRewritePattern<cir::SwitchOp> {
183 using OpRewritePattern<cir::SwitchOp>::OpRewritePattern;
185 inline void rewriteYieldOp(mlir::PatternRewriter &rewriter,
186 cir::YieldOp yieldOp,
187 mlir::Block *destination)
const {
188 rewriter.setInsertionPoint(yieldOp);
189 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, yieldOp.getOperands(),
194 Block *condBrToRangeDestination(cir::SwitchOp op,
195 mlir::PatternRewriter &rewriter,
196 mlir::Block *rangeDestination,
197 mlir::Block *defaultDestination,
198 const APInt &lowerBound,
199 const APInt &upperBound)
const {
200 assert(lowerBound.sle(upperBound) &&
"Invalid range");
201 mlir::Block *resBlock = rewriter.createBlock(defaultDestination);
202 cir::IntType sIntType = cir::IntType::get(op.getContext(), 32,
true);
203 cir::IntType uIntType = cir::IntType::get(op.getContext(), 32,
false);
205 cir::ConstantOp rangeLength = cir::ConstantOp::create(
206 rewriter, op.getLoc(),
207 cir::IntAttr::get(sIntType, upperBound - lowerBound));
209 cir::ConstantOp lowerBoundValue = cir::ConstantOp::create(
210 rewriter, op.getLoc(), cir::IntAttr::get(sIntType, lowerBound));
211 mlir::Value diffValue = cir::SubOp::create(
212 rewriter, op.getLoc(), op.getCondition(), lowerBoundValue);
215 cir::CastOp uDiffValue = cir::CastOp::create(
216 rewriter, op.getLoc(), uIntType, CastKind::integral, diffValue);
217 cir::CastOp uRangeLength = cir::CastOp::create(
218 rewriter, op.getLoc(), uIntType, CastKind::integral, rangeLength);
220 cir::CmpOp cmpResult = cir::CmpOp::create(
221 rewriter, op.getLoc(), cir::CmpOpKind::le, uDiffValue, uRangeLength);
222 cir::BrCondOp::create(rewriter, op.getLoc(), cmpResult, rangeDestination,
228 matchAndRewrite(cir::SwitchOp op,
229 mlir::PatternRewriter &rewriter)
const override {
233 bool hasNestedCleanup = op->walk([&](cir::CleanupScopeOp) {
234 return mlir::WalkResult::interrupt();
236 if (hasNestedCleanup)
237 return mlir::failure();
239 llvm::SmallVector<CaseOp> cases;
240 op.collectCases(cases);
244 rewriter.eraseOp(op);
245 return mlir::success();
249 mlir::Block *exitBlock = rewriter.splitBlock(
250 rewriter.getBlock(), op->getNextNode()->getIterator());
263 cir::YieldOp switchYield =
nullptr;
265 for (mlir::Block &block :
266 llvm::make_early_inc_range(op.getBody().getBlocks()))
267 if (
auto yieldOp = dyn_cast<cir::YieldOp>(block.getTerminator()))
268 switchYield = yieldOp;
270 assert(!op.getBody().empty());
271 mlir::Block *originalBlock = op->getBlock();
272 mlir::Block *swopBlock =
273 rewriter.splitBlock(originalBlock, op->getIterator());
274 rewriter.inlineRegionBefore(op.getBody(), exitBlock);
277 rewriteYieldOp(rewriter, switchYield, exitBlock);
279 rewriter.setInsertionPointToEnd(originalBlock);
280 cir::BrOp::create(rewriter, op.getLoc(), swopBlock);
285 llvm::SmallVector<mlir::APInt, 8> caseValues;
286 llvm::SmallVector<mlir::Block *, 8> caseDestinations;
287 llvm::SmallVector<mlir::ValueRange, 8> caseOperands;
289 llvm::SmallVector<std::pair<APInt, APInt>> rangeValues;
290 llvm::SmallVector<mlir::Block *> rangeDestinations;
291 llvm::SmallVector<mlir::ValueRange> rangeOperands;
294 mlir::Block *defaultDestination = exitBlock;
295 mlir::ValueRange defaultOperands = exitBlock->getArguments();
298 for (cir::CaseOp caseOp : cases) {
299 mlir::Region ®ion = caseOp.getCaseRegion();
302 switch (caseOp.getKind()) {
303 case cir::CaseOpKind::Default:
304 defaultDestination = ®ion.front();
305 defaultOperands = defaultDestination->getArguments();
307 case cir::CaseOpKind::Range:
308 assert(caseOp.getValue().size() == 2 &&
309 "Case range should have 2 case value");
310 rangeValues.push_back(
311 {cast<cir::IntAttr>(caseOp.getValue()[0]).getValue(),
312 cast<cir::IntAttr>(caseOp.getValue()[1]).getValue()});
313 rangeDestinations.push_back(®ion.front());
314 rangeOperands.push_back(rangeDestinations.back()->getArguments());
316 case cir::CaseOpKind::Anyof:
317 case cir::CaseOpKind::Equal:
319 for (
const mlir::Attribute &value : caseOp.getValue()) {
320 caseValues.push_back(cast<cir::IntAttr>(value).getValue());
321 caseDestinations.push_back(®ion.front());
322 caseOperands.push_back(caseDestinations.back()->getArguments());
328 walkRegionSkipping<cir::LoopOpInterface, cir::SwitchOp>(
329 region, [&](mlir::Operation *op) {
330 if (!isa<cir::BreakOp>(op))
331 return mlir::WalkResult::advance();
333 lowerTerminator(op, exitBlock, rewriter);
334 return mlir::WalkResult::skip();
338 for (mlir::Block &blk : region.getBlocks()) {
339 if (blk.getNumSuccessors())
342 if (
auto yieldOp = dyn_cast<cir::YieldOp>(blk.getTerminator())) {
343 mlir::Operation *nextOp = caseOp->getNextNode();
344 assert(nextOp &&
"caseOp is not expected to be the last op");
345 mlir::Block *oldBlock = nextOp->getBlock();
346 mlir::Block *newBlock =
347 rewriter.splitBlock(oldBlock, nextOp->getIterator());
348 rewriter.setInsertionPointToEnd(oldBlock);
349 cir::BrOp::create(rewriter, nextOp->getLoc(), mlir::ValueRange(),
351 rewriteYieldOp(rewriter, yieldOp, newBlock);
355 mlir::Block *oldBlock = caseOp->getBlock();
356 mlir::Block *newBlock =
357 rewriter.splitBlock(oldBlock, caseOp->getIterator());
359 mlir::Block &entryBlock = caseOp.getCaseRegion().front();
360 rewriter.inlineRegionBefore(caseOp.getCaseRegion(), newBlock);
363 rewriter.setInsertionPointToEnd(oldBlock);
364 cir::BrOp::create(rewriter, caseOp.getLoc(), &entryBlock);
368 for (cir::CaseOp caseOp : cases) {
369 mlir::Block *caseBlock = caseOp->getBlock();
372 if (caseBlock->hasNoPredecessors())
373 rewriter.eraseBlock(caseBlock);
375 rewriter.eraseOp(caseOp);
378 for (
auto [rangeVal, operand, destination] :
379 llvm::zip(rangeValues, rangeOperands, rangeDestinations)) {
380 APInt lowerBound = rangeVal.first;
381 APInt upperBound = rangeVal.second;
384 if (lowerBound.sgt(upperBound))
389 constexpr int kSmallRangeThreshold = 64;
390 if ((upperBound - lowerBound)
391 .ult(llvm::APInt(32, kSmallRangeThreshold))) {
392 for (APInt iValue = lowerBound; iValue.sle(upperBound); ++iValue) {
393 caseValues.push_back(iValue);
394 caseOperands.push_back(operand);
395 caseDestinations.push_back(destination);
401 condBrToRangeDestination(op, rewriter, destination,
402 defaultDestination, lowerBound, upperBound);
403 defaultOperands = operand;
407 rewriter.setInsertionPoint(op);
408 rewriter.replaceOpWithNewOp<cir::SwitchFlatOp>(
409 op, op.getCondition(), defaultDestination, defaultOperands, caseValues,
410 caseDestinations, caseOperands);
412 return mlir::success();
416class CIRLoopOpInterfaceFlattening
417 :
public mlir::OpInterfaceRewritePattern<cir::LoopOpInterface> {
419 using mlir::OpInterfaceRewritePattern<
420 cir::LoopOpInterface>::OpInterfaceRewritePattern;
422 inline void lowerConditionOp(cir::ConditionOp op, mlir::Block *body,
424 mlir::PatternRewriter &rewriter)
const {
425 mlir::OpBuilder::InsertionGuard guard(rewriter);
426 rewriter.setInsertionPoint(op);
427 rewriter.replaceOpWithNewOp<cir::BrCondOp>(op, op.getCondition(), body,
432 matchAndRewrite(cir::LoopOpInterface op,
433 mlir::PatternRewriter &rewriter)
const final {
437 bool hasNestedCleanup = op->walk([&](cir::CleanupScopeOp) {
438 return mlir::WalkResult::interrupt();
440 if (hasNestedCleanup)
441 return mlir::failure();
444 mlir::Block *entry = rewriter.getInsertionBlock();
446 rewriter.splitBlock(entry, rewriter.getInsertionPoint());
447 mlir::Block *cond = &op.getCond().front();
448 mlir::Block *body = &op.getBody().front();
450 (op.maybeGetStep() ? &op.maybeGetStep()->front() :
nullptr);
453 rewriter.setInsertionPointToEnd(entry);
454 cir::BrOp::create(rewriter, op.getLoc(), &op.getEntry().front());
461 cast<cir::ConditionOp>(op.getCond().back().getTerminator());
462 lowerConditionOp(conditionOp, body, exit, rewriter);
469 mlir::Block *dest = (
step ?
step : cond);
470 op.walkBodySkippingNestedLoops([&](mlir::Operation *op) {
471 if (!isa<cir::ContinueOp>(op))
472 return mlir::WalkResult::advance();
474 lowerTerminator(op, dest, rewriter);
475 return mlir::WalkResult::skip();
479 walkRegionSkipping<cir::LoopOpInterface, cir::SwitchOp>(
480 op.getBody(), [&](mlir::Operation *op) {
481 if (!isa<cir::BreakOp>(op))
482 return mlir::WalkResult::advance();
484 lowerTerminator(op, exit, rewriter);
485 return mlir::WalkResult::skip();
489 for (mlir::Block &blk : op.getBody().getBlocks()) {
490 auto bodyYield = dyn_cast<cir::YieldOp>(blk.getTerminator());
492 lowerTerminator(bodyYield, (
step ?
step : cond), rewriter);
500 cast<cir::YieldOp>(op.maybeGetStep()->back().getTerminator()), cond,
504 rewriter.inlineRegionBefore(op.getCond(), exit);
505 rewriter.inlineRegionBefore(op.getBody(), exit);
507 rewriter.inlineRegionBefore(*op.maybeGetStep(), exit);
509 rewriter.eraseOp(op);
510 return mlir::success();
514class CIRTernaryOpFlattening :
public mlir::OpRewritePattern<cir::TernaryOp> {
516 using OpRewritePattern<cir::TernaryOp>::OpRewritePattern;
519 matchAndRewrite(cir::TernaryOp op,
520 mlir::PatternRewriter &rewriter)
const override {
521 Location loc = op->getLoc();
522 Block *condBlock = rewriter.getInsertionBlock();
523 Block::iterator opPosition = rewriter.getInsertionPoint();
524 Block *remainingOpsBlock = rewriter.splitBlock(condBlock, opPosition);
525 llvm::SmallVector<mlir::Location, 2> locs;
528 if (op->getResultTypes().size())
530 Block *continueBlock =
531 rewriter.createBlock(remainingOpsBlock, op->getResultTypes(), locs);
532 cir::BrOp::create(rewriter, loc, remainingOpsBlock);
534 Region &trueRegion = op.getTrueRegion();
535 Block *trueBlock = &trueRegion.front();
536 mlir::Operation *trueTerminator = trueRegion.back().getTerminator();
537 rewriter.setInsertionPointToEnd(&trueRegion.back());
540 if (
auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator)) {
541 rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
543 }
else if (isa<cir::UnreachableOp>(trueTerminator)) {
546 trueTerminator->emitError(
"unexpected terminator in ternary true region, "
547 "expected yield or unreachable, got: ")
548 << trueTerminator->getName();
549 return mlir::failure();
551 rewriter.inlineRegionBefore(trueRegion, continueBlock);
553 Block *falseBlock = continueBlock;
554 Region &falseRegion = op.getFalseRegion();
556 falseBlock = &falseRegion.front();
557 mlir::Operation *falseTerminator = falseRegion.back().getTerminator();
558 rewriter.setInsertionPointToEnd(&falseRegion.back());
561 if (
auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator)) {
562 rewriter.replaceOpWithNewOp<cir::BrOp>(
563 falseYieldOp, falseYieldOp.getArgs(), continueBlock);
564 }
else if (isa<cir::UnreachableOp>(falseTerminator)) {
567 falseTerminator->emitError(
"unexpected terminator in ternary false "
568 "region, expected yield or unreachable, got: ")
569 << falseTerminator->getName();
570 return mlir::failure();
572 rewriter.inlineRegionBefore(falseRegion, continueBlock);
574 rewriter.setInsertionPointToEnd(condBlock);
575 cir::BrCondOp::create(rewriter, loc, op.getCond(), trueBlock, falseBlock);
577 rewriter.replaceOp(op, continueBlock->getArguments());
580 return mlir::success();
587static cir::AllocaOp getOrCreateCleanupDestSlot(cir::FuncOp funcOp,
588 mlir::PatternRewriter &rewriter,
589 mlir::Location loc) {
590 mlir::Block &entryBlock = funcOp.getBody().front();
593 auto it = llvm::find_if(entryBlock, [](
auto &op) {
594 return mlir::isa<AllocaOp>(&op) &&
595 mlir::cast<AllocaOp>(&op).getCleanupDestSlot();
597 if (it != entryBlock.end())
598 return mlir::cast<cir::AllocaOp>(*it);
601 mlir::OpBuilder::InsertionGuard guard(rewriter);
602 rewriter.setInsertionPointToStart(&entryBlock);
603 cir::IntType s32Type =
604 cir::IntType::get(rewriter.getContext(), 32,
true);
605 cir::PointerType ptrToS32Type = cir::PointerType::get(s32Type);
607 uint64_t alignment = dataLayout.getAlignment(s32Type,
true).value();
608 auto allocaOp = cir::AllocaOp::create(
609 rewriter, loc, ptrToS32Type, s32Type,
"__cleanup_dest_slot",
610 rewriter.getI64IntegerAttr(alignment));
611 allocaOp.setCleanupDestSlot(
true);
623collectThrowingCalls(mlir::Region ®ion,
625 region.walk([&](cir::CallOp callOp) {
626 if (!callOp.getNothrow())
627 callsToRewrite.push_back(callOp);
637static void collectResumeOps(mlir::Region ®ion,
639 region.walk([&](cir::ResumeOp resumeOp) { resumeOps.push_back(resumeOp); });
644static void replaceCallWithTryCall(cir::CallOp callOp, mlir::Block *unwindDest,
646 mlir::PatternRewriter &rewriter) {
647 mlir::Block *callBlock = callOp->getBlock();
649 assert(!callOp.getNothrow() &&
"call is not expected to throw");
653 mlir::Block *normalDest =
654 rewriter.splitBlock(callBlock, std::next(callOp->getIterator()));
657 rewriter.setInsertionPoint(callOp);
658 cir::TryCallOp tryCallOp;
659 if (callOp.isIndirect()) {
660 mlir::Value indTarget = callOp.getIndirectCall();
661 auto ptrTy = mlir::cast<cir::PointerType>(indTarget.getType());
662 auto resTy = mlir::cast<cir::FuncType>(ptrTy.getPointee());
664 cir::TryCallOp::create(rewriter, loc, indTarget, resTy, normalDest,
665 unwindDest, callOp.getArgOperands());
667 mlir::Type resType = callOp->getNumResults() > 0
668 ? callOp->getResult(0).getType()
671 cir::TryCallOp::create(rewriter, loc, callOp.getCalleeAttr(), resType,
672 normalDest, unwindDest, callOp.getArgOperands());
677 llvm::StringRef excludedAttrs[] = {
678 CIRDialect::getCalleeAttrName(),
679 CIRDialect::getOperandSegmentSizesAttrName(),
684 llvm::StringRef unexpectedAttrs[] = {
685 CIRDialect::getNoThrowAttrName(),
686 CIRDialect::getNoUnwindAttrName(),
689 for (mlir::NamedAttribute attr : callOp->getAttrs()) {
690 if (llvm::is_contained(excludedAttrs,
attr.getName()))
692 assert(!llvm::is_contained(unexpectedAttrs,
attr.getName()) &&
693 "unexpected attribute on converted call");
694 tryCallOp->setAttr(
attr.getName(),
attr.getValue());
698 if (callOp->getNumResults() > 0)
699 callOp->getResult(0).replaceAllUsesWith(tryCallOp.getResult());
701 rewriter.eraseOp(callOp);
707static mlir::Block *buildUnwindBlock(mlir::Block *dest,
bool hasCleanup,
709 mlir::Block *insertBefore,
710 mlir::PatternRewriter &rewriter) {
711 mlir::Block *unwindBlock = rewriter.createBlock(insertBefore);
712 rewriter.setInsertionPointToEnd(unwindBlock);
714 cir::EhInitiateOp::create(rewriter, loc, hasCleanup);
715 cir::BrOp::create(rewriter, loc, mlir::ValueRange{ehInitiate.getEhToken()},
720class CIRCleanupScopeOpFlattening
721 :
public mlir::OpRewritePattern<cir::CleanupScopeOp> {
723 using OpRewritePattern<cir::CleanupScopeOp>::OpRewritePattern;
728 mlir::Operation *exitOp;
734 CleanupExit(mlir::Operation *op,
int id) : exitOp(op), destinationId(id) {}
753 void collectExits(mlir::Region &cleanupBodyRegion,
754 llvm::SmallVectorImpl<CleanupExit> &exits,
759 for (mlir::Block &block : cleanupBodyRegion) {
760 auto *terminator = block.getTerminator();
761 if (isa<cir::YieldOp>(terminator))
762 exits.emplace_back(terminator, nextId++);
769 auto collectExitsInLoop = [&](mlir::Operation *loopOp) {
770 loopOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) {
771 if (isa<cir::ReturnOp, cir::GotoOp>(nestedOp))
772 exits.emplace_back(nestedOp, nextId++);
773 return mlir::WalkResult::advance();
778 std::function<void(mlir::Region &,
bool)> collectExitsInCleanup;
783 collectExitsInSwitch = [&](mlir::Operation *switchOp) {
784 switchOp->walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *nestedOp) {
785 if (isa<cir::CleanupScopeOp>(nestedOp)) {
788 collectExitsInCleanup(
789 cast<cir::CleanupScopeOp>(nestedOp).getBodyRegion(),
791 return mlir::WalkResult::skip();
792 }
else if (isa<cir::LoopOpInterface>(nestedOp)) {
793 collectExitsInLoop(nestedOp);
794 return mlir::WalkResult::skip();
795 }
else if (isa<cir::ReturnOp, cir::GotoOp, cir::ContinueOp>(nestedOp)) {
796 exits.emplace_back(nestedOp, nextId++);
798 return mlir::WalkResult::advance();
805 collectExitsInCleanup = [&](mlir::Region ®ion,
bool ignoreBreak) {
806 region.walk<mlir::WalkOrder::PreOrder>([&](mlir::Operation *op) {
813 if (!ignoreBreak && isa<cir::BreakOp>(op)) {
814 exits.emplace_back(op, nextId++);
815 }
else if (isa<cir::ContinueOp, cir::ReturnOp, cir::GotoOp>(op)) {
816 exits.emplace_back(op, nextId++);
817 }
else if (isa<cir::CleanupScopeOp>(op)) {
819 collectExitsInCleanup(cast<cir::CleanupScopeOp>(op).getBodyRegion(),
821 return mlir::WalkResult::skip();
822 }
else if (isa<cir::LoopOpInterface>(op)) {
826 collectExitsInLoop(op);
827 return mlir::WalkResult::skip();
828 }
else if (isa<cir::SwitchOp>(op)) {
832 collectExitsInSwitch(op);
833 return mlir::WalkResult::skip();
835 return mlir::WalkResult::advance();
840 collectExitsInCleanup(cleanupBodyRegion,
false);
846 static bool shouldSinkReturnOperand(mlir::Value operand,
847 cir::ReturnOp returnOp) {
849 mlir::Operation *defOp = operand.getDefiningOp();
855 if (!mlir::isa<cir::ConstantOp, cir::LoadOp>(defOp))
859 if (!operand.hasOneUse())
863 if (defOp->getBlock() != returnOp->getBlock())
866 if (
auto loadOp = mlir::dyn_cast<cir::LoadOp>(defOp)) {
868 mlir::Value ptr = loadOp.getAddr();
869 auto funcOp = returnOp->getParentOfType<cir::FuncOp>();
870 assert(funcOp &&
"Return op has no function parent?");
871 mlir::Block &funcEntryBlock = funcOp.getBody().front();
875 mlir::dyn_cast_if_present<cir::AllocaOp>(ptr.getDefiningOp()))
876 return allocaOp->getBlock() == &funcEntryBlock;
882 assert(mlir::isa<cir::ConstantOp>(defOp) &&
"Expected constant op");
891 getReturnOpOperands(cir::ReturnOp returnOp, mlir::Operation *exitOp,
892 mlir::Location loc, mlir::PatternRewriter &rewriter,
893 llvm::SmallVectorImpl<mlir::Value> &returnValues)
const {
894 mlir::Block *destBlock = rewriter.getInsertionBlock();
895 auto funcOp = exitOp->getParentOfType<cir::FuncOp>();
896 assert(funcOp &&
"Return op has no function parent?");
897 mlir::Block &funcEntryBlock = funcOp.getBody().front();
899 for (mlir::Value operand : returnOp.getOperands()) {
900 if (shouldSinkReturnOperand(operand, returnOp)) {
902 mlir::Operation *defOp = operand.getDefiningOp();
903 defOp->moveBefore(destBlock, destBlock->end());
904 returnValues.push_back(operand);
907 cir::AllocaOp alloca;
909 mlir::OpBuilder::InsertionGuard guard(rewriter);
910 rewriter.setInsertionPointToStart(&funcEntryBlock);
911 cir::CIRDataLayout dataLayout(
912 funcOp->getParentOfType<mlir::ModuleOp>());
914 dataLayout.getAlignment(operand.getType(),
true).value();
915 cir::PointerType ptrType = cir::PointerType::get(operand.getType());
916 alloca = cir::AllocaOp::create(rewriter, loc, ptrType,
917 operand.getType(),
"__ret_operand_tmp",
918 rewriter.getI64IntegerAttr(alignment));
923 mlir::OpBuilder::InsertionGuard guard(rewriter);
924 rewriter.setInsertionPoint(exitOp);
925 cir::StoreOp::create(rewriter, loc, operand, alloca,
928 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
932 rewriter.setInsertionPointToEnd(destBlock);
933 auto loaded = cir::LoadOp::create(
934 rewriter, loc, alloca,
false,
935 false, mlir::IntegerAttr(),
936 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
937 returnValues.push_back(loaded);
947 createExitTerminator(mlir::Operation *exitOp, mlir::Location loc,
948 mlir::Block *continueBlock,
949 mlir::PatternRewriter &rewriter)
const {
950 return llvm::TypeSwitch<mlir::Operation *, mlir::LogicalResult>(exitOp)
951 .Case<cir::YieldOp>([&](
auto) {
953 cir::BrOp::create(rewriter, loc, continueBlock);
954 return mlir::success();
956 .Case<cir::BreakOp>([&](
auto) {
958 cir::BreakOp::create(rewriter, loc);
959 return mlir::success();
961 .Case<cir::ContinueOp>([&](
auto) {
963 cir::ContinueOp::create(rewriter, loc);
964 return mlir::success();
966 .Case<cir::ReturnOp>([&](
auto returnOp) {
970 if (returnOp.hasOperand()) {
971 llvm::SmallVector<mlir::Value, 2> returnValues;
972 getReturnOpOperands(returnOp, exitOp, loc, rewriter, returnValues);
973 cir::ReturnOp::create(rewriter, loc, returnValues);
975 cir::ReturnOp::create(rewriter, loc);
977 return mlir::success();
979 .Case<cir::GotoOp>([&](
auto gotoOp) {
986 cir::UnreachableOp::create(rewriter, loc);
987 return gotoOp.emitError(
988 "goto in cleanup scope is not yet implemented");
990 .
Default([&](mlir::Operation *op) {
991 cir::UnreachableOp::create(rewriter, loc);
992 return op->emitError(
993 "unexpected exit operation in cleanup scope body");
999 static bool regionExitsOnlyFromLastBlock(mlir::Region ®ion) {
1000 for (mlir::Block &block : region) {
1001 if (&block == ®ion.back())
1003 bool expectedTerminator =
1004 llvm::TypeSwitch<mlir::Operation *, bool>(block.getTerminator())
1011 .Case<cir::YieldOp, cir::ReturnOp, cir::ResumeFlatOp,
1012 cir::ContinueOp, cir::BreakOp, cir::GotoOp>(
1013 [](
auto) {
return false; })
1022 .Case<cir::TryCallOp>([](
auto) {
return false; })
1026 .Case<cir::EhDispatchOp>([](
auto) {
return false; })
1030 .Case<cir::SwitchFlatOp>([](
auto) {
return false; })
1033 .Case<cir::UnreachableOp, cir::TrapOp>([](
auto) {
return true; })
1035 .Case<cir::IndirectBrOp>([](
auto) {
return false; })
1038 .Case<cir::BrOp>([&](cir::BrOp brOp) {
1039 assert(brOp.getDest()->getParent() == ®ion &&
1040 "branch destination is not in the region");
1043 .Case<cir::BrCondOp>([&](cir::BrCondOp brCondOp) {
1044 assert(brCondOp.getDestTrue()->getParent() == ®ion &&
1045 "branch destination is not in the region");
1046 assert(brCondOp.getDestFalse()->getParent() == ®ion &&
1047 "branch destination is not in the region");
1051 .
Default([](mlir::Operation *) ->
bool {
1052 llvm_unreachable(
"unexpected terminator in cleanup region");
1054 if (!expectedTerminator)
1082 mlir::Block *buildEHCleanupBlocks(cir::CleanupScopeOp cleanupOp,
1084 mlir::Block *insertBefore,
1085 mlir::PatternRewriter &rewriter)
const {
1086 assert(regionExitsOnlyFromLastBlock(cleanupOp.getCleanupRegion()) &&
1087 "cleanup region has exits in non-final blocks");
1091 mlir::Block *blockBeforeClone =
insertBefore->getPrevNode();
1094 rewriter.cloneRegionBefore(cleanupOp.getCleanupRegion(), insertBefore);
1097 mlir::Block *clonedEntry = blockBeforeClone
1098 ? blockBeforeClone->getNextNode()
1103 auto ehTokenType = cir::EhTokenType::get(rewriter.getContext());
1104 mlir::Value ehToken = clonedEntry->addArgument(ehTokenType, loc);
1106 rewriter.setInsertionPointToStart(clonedEntry);
1107 auto beginCleanup = cir::BeginCleanupOp::create(rewriter, loc, ehToken);
1111 mlir::Block *lastClonedBlock =
insertBefore->getPrevNode();
1113 mlir::dyn_cast<cir::YieldOp>(lastClonedBlock->getTerminator());
1115 rewriter.setInsertionPoint(yieldOp);
1116 cir::EndCleanupOp::create(rewriter, loc, beginCleanup.getCleanupToken());
1117 rewriter.replaceOpWithNewOp<cir::ResumeOp>(yieldOp, ehToken);
1119 cleanupOp->emitError(
"Not yet implemented: cleanup region terminated "
1120 "with non-yield operation");
1149 flattenCleanup(cir::CleanupScopeOp cleanupOp,
1150 llvm::SmallVectorImpl<CleanupExit> &exits,
1151 llvm::SmallVectorImpl<cir::CallOp> &callsToRewrite,
1152 llvm::SmallVectorImpl<cir::ResumeOp> &resumeOpsToChain,
1153 mlir::PatternRewriter &rewriter)
const {
1154 mlir::Location loc = cleanupOp.getLoc();
1155 cir::CleanupKind cleanupKind = cleanupOp.getCleanupKind();
1156 bool hasNormalCleanup = cleanupKind == cir::CleanupKind::Normal ||
1157 cleanupKind == cir::CleanupKind::All;
1158 bool hasEHCleanup = cleanupKind == cir::CleanupKind::EH ||
1159 cleanupKind == cir::CleanupKind::All;
1160 bool isMultiExit = exits.size() > 1;
1163 mlir::Block *bodyEntry = &cleanupOp.getBodyRegion().front();
1164 mlir::Block *cleanupEntry = &cleanupOp.getCleanupRegion().front();
1165 mlir::Block *cleanupExit = &cleanupOp.getCleanupRegion().back();
1166 assert(regionExitsOnlyFromLastBlock(cleanupOp.getCleanupRegion()) &&
1167 "cleanup region has exits in non-final blocks");
1168 auto cleanupYield = dyn_cast<cir::YieldOp>(cleanupExit->getTerminator());
1169 if (!cleanupYield) {
1170 return rewriter.notifyMatchFailure(cleanupOp,
1171 "Not yet implemented: cleanup region "
1172 "terminated with non-yield operation");
1179 cir::AllocaOp destSlot;
1180 if (isMultiExit && hasNormalCleanup) {
1181 auto funcOp = cleanupOp->getParentOfType<cir::FuncOp>();
1183 return cleanupOp->emitError(
"cleanup scope not inside a function");
1184 destSlot = getOrCreateCleanupDestSlot(funcOp, rewriter, loc);
1188 mlir::Block *currentBlock = rewriter.getInsertionBlock();
1189 mlir::Block *continueBlock =
1190 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
1200 mlir::Block *unwindBlock =
nullptr;
1201 mlir::Block *ehCleanupEntry =
nullptr;
1203 (!callsToRewrite.empty() || !resumeOpsToChain.empty())) {
1205 buildEHCleanupBlocks(cleanupOp, loc, continueBlock, rewriter);
1209 if (!callsToRewrite.empty())
1210 unwindBlock = buildUnwindBlock(ehCleanupEntry,
true, loc,
1211 ehCleanupEntry, rewriter);
1218 mlir::Block *normalInsertPt =
1219 unwindBlock ? unwindBlock
1220 : (ehCleanupEntry ? ehCleanupEntry : continueBlock);
1223 rewriter.inlineRegionBefore(cleanupOp.getBodyRegion(), normalInsertPt);
1226 if (hasNormalCleanup)
1227 rewriter.inlineRegionBefore(cleanupOp.getCleanupRegion(), normalInsertPt);
1230 rewriter.setInsertionPointToEnd(currentBlock);
1231 cir::BrOp::create(rewriter, loc, bodyEntry);
1234 mlir::LogicalResult result = mlir::success();
1235 if (hasNormalCleanup) {
1237 mlir::Block *exitBlock = rewriter.createBlock(normalInsertPt);
1240 rewriter.setInsertionPoint(cleanupYield);
1241 rewriter.replaceOpWithNewOp<cir::BrOp>(cleanupYield, exitBlock);
1245 rewriter.setInsertionPointToEnd(exitBlock);
1248 auto slotValue = cir::LoadOp::create(
1249 rewriter, loc, destSlot,
false,
1250 false, mlir::IntegerAttr(),
1251 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
1254 llvm::SmallVector<mlir::APInt, 8> caseValues;
1255 llvm::SmallVector<mlir::Block *, 8> caseDestinations;
1256 llvm::SmallVector<mlir::ValueRange, 8> caseOperands;
1257 cir::IntType s32Type =
1258 cir::IntType::get(rewriter.getContext(), 32,
true);
1260 for (
const CleanupExit &exit : exits) {
1262 mlir::Block *destBlock = rewriter.createBlock(normalInsertPt);
1263 rewriter.setInsertionPointToEnd(destBlock);
1265 createExitTerminator(exit.exitOp, loc, continueBlock, rewriter);
1268 caseValues.push_back(
1269 llvm::APInt(32,
static_cast<uint64_t>(exit.destinationId),
true));
1270 caseDestinations.push_back(destBlock);
1271 caseOperands.push_back(mlir::ValueRange());
1275 rewriter.setInsertionPoint(exit.exitOp);
1276 auto destIdConst = cir::ConstantOp::create(
1277 rewriter, loc, cir::IntAttr::get(s32Type, exit.destinationId));
1278 cir::StoreOp::create(rewriter, loc, destIdConst, destSlot,
1280 mlir::IntegerAttr(),
1281 cir::SyncScopeKindAttr(), cir::MemOrderAttr());
1282 rewriter.replaceOpWithNewOp<cir::BrOp>(exit.exitOp, cleanupEntry);
1290 if (result.failed())
1295 mlir::Block *defaultBlock = rewriter.createBlock(normalInsertPt);
1296 rewriter.setInsertionPointToEnd(defaultBlock);
1297 cir::UnreachableOp::create(rewriter, loc);
1300 rewriter.setInsertionPointToEnd(exitBlock);
1301 cir::SwitchFlatOp::create(rewriter, loc, slotValue, defaultBlock,
1302 mlir::ValueRange(), caseValues,
1303 caseDestinations, caseOperands);
1307 rewriter.setInsertionPointToEnd(exitBlock);
1308 mlir::Operation *exitOp = exits[0].exitOp;
1309 result = createExitTerminator(exitOp, loc, continueBlock, rewriter);
1312 rewriter.setInsertionPoint(exitOp);
1313 rewriter.replaceOpWithNewOp<cir::BrOp>(exitOp, cleanupEntry);
1318 for (CleanupExit &exit : exits) {
1319 if (isa<cir::YieldOp>(exit.exitOp)) {
1320 rewriter.setInsertionPoint(exit.exitOp);
1321 rewriter.replaceOpWithNewOp<cir::BrOp>(exit.exitOp, continueBlock);
1331 for (cir::CallOp callOp : callsToRewrite)
1332 replaceCallWithTryCall(callOp, unwindBlock, loc, rewriter);
1340 if (ehCleanupEntry) {
1341 for (cir::ResumeOp resumeOp : resumeOpsToChain) {
1342 mlir::Value ehToken = resumeOp.getEhToken();
1343 rewriter.setInsertionPoint(resumeOp);
1344 rewriter.replaceOpWithNewOp<cir::BrOp>(
1345 resumeOp, mlir::ValueRange{ehToken}, ehCleanupEntry);
1350 rewriter.eraseOp(cleanupOp);
1356 matchAndRewrite(cir::CleanupScopeOp cleanupOp,
1357 mlir::PatternRewriter &rewriter)
const override {
1358 mlir::OpBuilder::InsertionGuard guard(rewriter);
1363 bool hasNestedOps = cleanupOp.getBodyRegion()
1364 .walk([&](mlir::Operation *op) {
1365 if (isa<cir::CleanupScopeOp, cir::TryOp>(op))
1366 return mlir::WalkResult::interrupt();
1367 return mlir::WalkResult::advance();
1371 return mlir::failure();
1373 cir::CleanupKind cleanupKind = cleanupOp.getCleanupKind();
1378 if (cleanupKind != cir::CleanupKind::Normal) {
1379 llvm::SmallVector<cir::CallOp> cleanupThrowingCalls;
1380 collectThrowingCalls(cleanupOp.getCleanupRegion(), cleanupThrowingCalls);
1381 if (!cleanupThrowingCalls.empty())
1382 return cleanupOp->emitError(
1383 "throwing calls in cleanup region are not yet implemented");
1387 llvm::SmallVector<CleanupExit> exits;
1389 collectExits(cleanupOp.getBodyRegion(), exits, nextId);
1391 assert(!exits.empty() &&
"cleanup scope body has no exit");
1396 llvm::SmallVector<cir::CallOp> callsToRewrite;
1397 if (cleanupKind != cir::CleanupKind::Normal)
1398 collectThrowingCalls(cleanupOp.getBodyRegion(), callsToRewrite);
1402 llvm::SmallVector<cir::ResumeOp> resumeOpsToChain;
1403 if (cleanupKind != cir::CleanupKind::Normal)
1404 collectResumeOps(cleanupOp.getBodyRegion(), resumeOpsToChain);
1406 return flattenCleanup(cleanupOp, exits, callsToRewrite, resumeOpsToChain,
1411class CIRTryOpFlattening :
public mlir::OpRewritePattern<cir::TryOp> {
1413 using OpRewritePattern<cir::TryOp>::OpRewritePattern;
1418 mlir::Block *buildCatchDispatchBlock(
1419 cir::TryOp tryOp, mlir::ArrayAttr handlerTypes,
1420 llvm::SmallVectorImpl<mlir::Block *> &catchHandlerBlocks,
1421 mlir::Location loc, mlir::Block *insertBefore,
1422 mlir::PatternRewriter &rewriter)
const {
1423 mlir::Block *dispatchBlock = rewriter.createBlock(insertBefore);
1424 auto ehTokenType = cir::EhTokenType::get(rewriter.getContext());
1425 mlir::Value ehToken = dispatchBlock->addArgument(ehTokenType, loc);
1427 rewriter.setInsertionPointToEnd(dispatchBlock);
1430 llvm::SmallVector<mlir::Attribute> catchTypeAttrs;
1431 llvm::SmallVector<mlir::Block *> catchDests;
1432 mlir::Block *defaultDest =
nullptr;
1433 bool defaultIsCatchAll =
false;
1435 for (
auto [typeAttr, handlerBlock] :
1436 llvm::zip(handlerTypes, catchHandlerBlocks)) {
1437 if (mlir::isa<cir::CatchAllAttr>(typeAttr)) {
1438 assert(!defaultDest &&
"multiple catch_all or unwind handlers");
1439 defaultDest = handlerBlock;
1440 defaultIsCatchAll =
true;
1441 }
else if (mlir::isa<cir::UnwindAttr>(typeAttr)) {
1442 assert(!defaultDest &&
"multiple catch_all or unwind handlers");
1443 defaultDest = handlerBlock;
1444 defaultIsCatchAll =
false;
1447 catchTypeAttrs.push_back(typeAttr);
1448 catchDests.push_back(handlerBlock);
1452 assert(defaultDest &&
"dispatch must have a catch_all or unwind handler");
1454 mlir::ArrayAttr catchTypesArrayAttr;
1455 if (!catchTypeAttrs.empty())
1456 catchTypesArrayAttr = rewriter.getArrayAttr(catchTypeAttrs);
1458 cir::EhDispatchOp::create(rewriter, loc, ehToken, catchTypesArrayAttr,
1459 defaultIsCatchAll, defaultDest, catchDests);
1461 return dispatchBlock;
1478 mlir::Block *flattenCatchHandler(mlir::Region &handlerRegion,
1479 mlir::Block *continueBlock,
1481 mlir::Block *insertBefore,
1482 mlir::PatternRewriter &rewriter)
const {
1484 mlir::Block *handlerEntry = &handlerRegion.front();
1487 rewriter.inlineRegionBefore(handlerRegion, insertBefore);
1490 for (mlir::Block &block : llvm::make_range(handlerEntry->getIterator(),
1492 if (
auto yieldOp = dyn_cast<cir::YieldOp>(block.getTerminator())) {
1501 if (mlir::Operation *prev = yieldOp->getPrevNode())
1502 return isa<cir::EndCatchOp>(prev);
1505 mlir::Block *
b = block.getSinglePredecessor();
1507 mlir::Operation *term =
b->getTerminator();
1508 if (mlir::Operation *prev = term->getPrevNode())
1509 return isa<cir::EndCatchOp>(prev);
1510 if (!isa<cir::BrOp>(term))
1512 b =
b->getSinglePredecessor();
1515 }() &&
"expected end_catch as last operation before yield "
1516 "in catch handler, with only branches in between");
1517 rewriter.setInsertionPoint(yieldOp);
1518 rewriter.replaceOpWithNewOp<cir::BrOp>(yieldOp, continueBlock);
1522 return handlerEntry;
1531 mlir::Block *flattenUnwindHandler(mlir::Region &unwindRegion,
1533 mlir::Block *insertBefore,
1534 mlir::PatternRewriter &rewriter)
const {
1535 mlir::Block *unwindEntry = &unwindRegion.front();
1536 rewriter.inlineRegionBefore(unwindRegion, insertBefore);
1541 matchAndRewrite(cir::TryOp tryOp,
1542 mlir::PatternRewriter &rewriter)
const override {
1548 ->walk([&](mlir::Operation *op) {
1549 if (isa<cir::CleanupScopeOp, cir::TryOp>(op) && op != tryOp)
1550 return mlir::WalkResult::interrupt();
1551 return mlir::WalkResult::advance();
1555 return mlir::failure();
1557 mlir::OpBuilder::InsertionGuard guard(rewriter);
1558 mlir::Location loc = tryOp.getLoc();
1560 mlir::ArrayAttr handlerTypes = tryOp.getHandlerTypesAttr();
1561 mlir::MutableArrayRef<mlir::Region> handlerRegions =
1562 tryOp.getHandlerRegions();
1565 llvm::SmallVector<cir::CallOp> callsToRewrite;
1566 collectThrowingCalls(tryOp.getTryRegion(), callsToRewrite);
1569 llvm::SmallVector<cir::ResumeOp> resumeOpsToChain;
1570 collectResumeOps(tryOp.getTryRegion(), resumeOpsToChain);
1573 mlir::Block *currentBlock = rewriter.getInsertionBlock();
1574 mlir::Block *continueBlock =
1575 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
1578 mlir::Block *bodyEntry = &tryOp.getTryRegion().front();
1579 mlir::Block *bodyExit = &tryOp.getTryRegion().back();
1582 rewriter.inlineRegionBefore(tryOp.getTryRegion(), continueBlock);
1585 rewriter.setInsertionPointToEnd(currentBlock);
1586 cir::BrOp::create(rewriter, loc, bodyEntry);
1589 if (
auto bodyYield = dyn_cast<cir::YieldOp>(bodyExit->getTerminator())) {
1590 rewriter.setInsertionPoint(bodyYield);
1591 rewriter.replaceOpWithNewOp<cir::BrOp>(bodyYield, continueBlock);
1595 if (!handlerTypes || handlerTypes.empty()) {
1596 rewriter.eraseOp(tryOp);
1597 return mlir::success();
1604 if (callsToRewrite.empty() && resumeOpsToChain.empty()) {
1605 rewriter.eraseOp(tryOp);
1606 return mlir::success();
1612 llvm::SmallVector<mlir::Block *> catchHandlerBlocks;
1614 for (
const auto &[idx, typeAttr] : llvm::enumerate(handlerTypes)) {
1615 mlir::Region &handlerRegion = handlerRegions[idx];
1617 if (mlir::isa<cir::UnwindAttr>(typeAttr)) {
1618 mlir::Block *unwindEntry =
1619 flattenUnwindHandler(handlerRegion, loc, continueBlock, rewriter);
1620 catchHandlerBlocks.push_back(unwindEntry);
1622 mlir::Block *handlerEntry = flattenCatchHandler(
1623 handlerRegion, continueBlock, loc, continueBlock, rewriter);
1624 catchHandlerBlocks.push_back(handlerEntry);
1629 mlir::Block *dispatchBlock =
1630 buildCatchDispatchBlock(tryOp, handlerTypes, catchHandlerBlocks, loc,
1631 catchHandlerBlocks.front(), rewriter);
1639 bool hasCleanup = tryOp.getCleanup();
1640 if (!callsToRewrite.empty()) {
1642 mlir::Block *unwindBlock = buildUnwindBlock(dispatchBlock, hasCleanup,
1643 loc, dispatchBlock, rewriter);
1645 for (cir::CallOp callOp : callsToRewrite)
1646 replaceCallWithTryCall(callOp, unwindBlock, loc, rewriter);
1652 for (cir::ResumeOp resumeOp : resumeOpsToChain) {
1653 mlir::Value ehToken = resumeOp.getEhToken();
1654 rewriter.setInsertionPoint(resumeOp);
1655 rewriter.replaceOpWithNewOp<cir::BrOp>(
1656 resumeOp, mlir::ValueRange{ehToken}, dispatchBlock);
1660 rewriter.eraseOp(tryOp);
1662 return mlir::success();
1666void populateFlattenCFGPatterns(RewritePatternSet &patterns) {
1668 .add<CIRIfFlattening, CIRLoopOpInterfaceFlattening, CIRScopeOpFlattening,
1669 CIRSwitchOpFlattening, CIRTernaryOpFlattening,
1670 CIRCleanupScopeOpFlattening, CIRTryOpFlattening>(
1671 patterns.getContext());
1674void CIRFlattenCFGPass::runOnOperation() {
1675 RewritePatternSet patterns(&getContext());
1676 populateFlattenCFGPatterns(patterns);
1679 llvm::SmallVector<Operation *, 16> ops;
1680 getOperation()->walk<mlir::WalkOrder::PostOrder>([&](Operation *op) {
1681 if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp, CleanupScopeOp,
1687 if (applyOpPatternsGreedily(ops, std::move(patterns)).failed())
1688 signalPassFailure();
1696 return std::make_unique<CIRFlattenCFGPass>();
const internal::VariadicAllOfMatcher< Attr > attr
std::unique_ptr< Pass > createCIRFlattenCFGPass()
int const char * function
float __ovld __cnfn step(float, float)
Returns 0.0 if x < edge, otherwise it returns 1.0.
static bool stackSaveOp()