clang 23.0.0git
EHABILowering.cpp
Go to the documentation of this file.
1//===- EHABILowering.cpp - Lower flattened CIR EH ops to ABI-specific form ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements a pass that lowers ABI-agnostic flattened CIR exception
10// handling operations into an ABI-specific form. Currently only the Itanium
11// C++ ABI is supported.
12//
13// The Itanium ABI lowering performs these transformations:
14// - cir.eh.initiate → cir.eh.inflight_exception (landing pad)
15// - cir.eh.dispatch → cir.eh.typeid + cir.cmp + cir.brcond chains
16// - cir.begin_cleanup → (removed)
17// - cir.end_cleanup → (removed)
18// - cir.begin_catch → call to __cxa_begin_catch
19// - cir.end_catch → call to __cxa_end_catch
20// - cir.eh.terminate → call to __clang_call_terminate + unreachable
21// - cir.resume → cir.resume.flat
22// - !cir.eh_token values → (!cir.ptr<!void>, !u32i) value pairs
23// - cir.construct_catch_param → __cxa_get_exception_ptr + inlined
24// catch-copy thunk body
25// - personality function set on functions requiring EH
26//
27//===----------------------------------------------------------------------===//
28
29#include "CIRTransformUtils.h"
30#include "PassDetail.h"
31#include "mlir/IR/Builders.h"
32#include "mlir/IR/IRMapping.h"
33#include "mlir/IR/PatternMatch.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/SmallVector.h"
40#include "llvm/TargetParser/Triple.h"
41
42using namespace mlir;
43using namespace cir;
44
45namespace mlir {
46#define GEN_PASS_DEF_CIREHABILOWERING
47#include "clang/CIR/Dialect/Passes.h.inc"
48} // namespace mlir
49
50namespace {
51
52//===----------------------------------------------------------------------===//
53// Shared utilities
54//===----------------------------------------------------------------------===//
55
56/// Ensure a function with the given name and type exists in the module. If it
57/// does not exist, create a private external declaration.
58static cir::FuncOp getOrCreateRuntimeFuncDecl(mlir::ModuleOp mod,
59 mlir::Location loc,
60 StringRef name,
61 cir::FuncType funcTy) {
62 if (auto existing = mod.lookupSymbol<cir::FuncOp>(name))
63 return existing;
64
65 mlir::OpBuilder builder(mod.getContext());
66 builder.setInsertionPointToEnd(mod.getBody());
67 auto funcOp = cir::FuncOp::create(builder, loc, name, funcTy);
68 funcOp.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
69 funcOp.setPrivate();
70 return funcOp;
71}
72
73//===----------------------------------------------------------------------===//
74// EH ABI Lowering Base Class
75//===----------------------------------------------------------------------===//
76
77/// Abstract base class for exception-handling ABI lowering.
78/// Each supported ABI (Itanium, Microsoft, etc.) provides a concrete subclass.
79class EHABILowering {
80public:
81 explicit EHABILowering(mlir::ModuleOp mod)
82 : mod(mod), ctx(mod.getContext()), builder(ctx) {}
83 virtual ~EHABILowering() = default;
84
85 /// Lower all EH operations in the module to an ABI-specific form.
86 virtual mlir::LogicalResult run() = 0;
87
88protected:
89 mlir::ModuleOp mod;
90 mlir::MLIRContext *ctx;
91 mlir::OpBuilder builder;
92};
93
94//===----------------------------------------------------------------------===//
95// Itanium EH ABI Lowering
96//===----------------------------------------------------------------------===//
97
98/// Lowers flattened CIR EH operations to the Itanium C++ ABI form.
99///
100/// The entry point is run(), which iterates over all functions and
101/// calls lowerFunc() for each. lowerFunc() drives all lowering from
102/// cir.eh.initiate operations: every other EH op (begin/end_cleanup,
103/// eh.dispatch, begin/end_catch, resume) is reachable by tracing the
104/// eh_token produced by the initiate through its users.
105class ItaniumEHLowering : public EHABILowering {
106public:
107 using EHABILowering::EHABILowering;
108 mlir::LogicalResult run() override;
109
110private:
111 /// Maps a !cir.eh_token value to its Itanium ABI replacement pair:
112 /// an exception pointer (!cir.ptr<!void>) and a type id (!u32i).
113 using EhTokenMap = DenseMap<mlir::Value, std::pair<mlir::Value, mlir::Value>>;
114
115 cir::VoidType voidType;
116 cir::PointerType voidPtrType;
117 cir::PointerType u8PtrType;
118 cir::IntType u32Type;
119
120 // Cached runtime function declarations, initialized when needed by
121 // ensureRuntimeDecls().
122 cir::FuncOp personalityFunc;
123 cir::FuncOp beginCatchFunc;
124 cir::FuncOp endCatchFunc;
125 cir::FuncOp getExceptionPtrFunc;
126 cir::FuncOp clangCallTerminateFunc;
127
128 DenseMap<mlir::StringAttr, cir::FuncOp> catchCopyThunks;
129
130 constexpr const static ::llvm::StringLiteral kGxxPersonality =
131 "__gxx_personality_v0";
132
133 void ensureRuntimeDecls(mlir::Location loc);
134 void ensureClangCallTerminate(mlir::Location loc);
135 mlir::Block *buildTerminateBlock(cir::FuncOp funcOp, mlir::Location loc);
136 mlir::FailureOr<cir::FuncOp>
137 resolveCatchCopyThunk(cir::ConstructCatchParamOp op);
138 mlir::LogicalResult lowerFunc(cir::FuncOp funcOp);
139 mlir::LogicalResult
140 lowerEhInitiate(cir::EhInitiateOp initiateOp, EhTokenMap &ehTokenMap,
141 SmallVectorImpl<mlir::Operation *> &deadOps);
142 void lowerDispatch(cir::EhDispatchOp dispatch, mlir::Value exnPtr,
143 mlir::Value typeId,
144 SmallVectorImpl<mlir::Operation *> &deadOps);
145 mlir::LogicalResult lowerConstructCatchParam(cir::ConstructCatchParamOp op,
146 mlir::Value exnPtr);
147 void lowerInitCatchParam(cir::InitCatchParamOp op);
148};
149
150/// Lower all EH operations in the module to the Itanium-specific form.
151mlir::LogicalResult ItaniumEHLowering::run() {
152 // Pre-compute the common types used throughout all function lowerings.
153 // TODO(cir): Move these to the base class if they are also needed for MSVC.
154 voidType = cir::VoidType::get(ctx);
155 voidPtrType = cir::PointerType::get(voidType);
156 auto u8Type = cir::IntType::get(ctx, 8, /*isSigned=*/false);
157 u8PtrType = cir::PointerType::get(u8Type);
158 u32Type = cir::IntType::get(ctx, 32, /*isSigned=*/false);
159
160 for (cir::FuncOp funcOp : mod.getOps<cir::FuncOp>()) {
161 if (mlir::failed(lowerFunc(funcOp)))
162 return mlir::failure();
163 }
164 return mlir::success();
165}
166
167/// Ensure the necessary Itanium runtime function declarations exist in the
168/// module.
169void ItaniumEHLowering::ensureRuntimeDecls(mlir::Location loc) {
170 // TODO(cir): Handle other personality functions. This probably isn't needed
171 // here if we fix codegen to always set the personality function.
172 if (!personalityFunc) {
173 auto s32Type = cir::IntType::get(ctx, 32, /*isSigned=*/true);
174 auto personalityFuncTy = cir::FuncType::get({}, s32Type, /*isVarArg=*/true);
175 personalityFunc = getOrCreateRuntimeFuncDecl(mod, loc, kGxxPersonality,
176 personalityFuncTy);
177 }
178
179 if (!beginCatchFunc) {
180 auto beginCatchFuncTy =
181 cir::FuncType::get({voidPtrType}, u8PtrType, /*isVarArg=*/false);
182 beginCatchFunc = getOrCreateRuntimeFuncDecl(mod, loc, "__cxa_begin_catch",
183 beginCatchFuncTy);
184 }
185
186 if (!endCatchFunc) {
187 auto endCatchFuncTy = cir::FuncType::get({}, voidType, /*isVarArg=*/false);
188 endCatchFunc =
189 getOrCreateRuntimeFuncDecl(mod, loc, "__cxa_end_catch", endCatchFuncTy);
190 }
191
192 if (!getExceptionPtrFunc) {
193 auto getExceptionPtrFuncTy =
194 cir::FuncType::get({voidPtrType}, u8PtrType, /*isVarArg=*/false);
195 getExceptionPtrFunc = getOrCreateRuntimeFuncDecl(
196 mod, loc, "__cxa_get_exception_ptr", getExceptionPtrFuncTy);
197 }
198}
199
200/// Ensure the __clang_call_terminate function exists in the module. This
201/// function is defined with a body that calls __cxa_begin_catch followed by
202/// std::terminate, matching the behavior of Clang's LLVM IR codegen.
203///
204/// void __clang_call_terminate(void *exn) nounwind noreturn {
205/// __cxa_begin_catch(exn);
206/// std::terminate();
207/// unreachable;
208/// }
209void ItaniumEHLowering::ensureClangCallTerminate(mlir::Location loc) {
210 if (clangCallTerminateFunc)
211 return;
212
213 ensureRuntimeDecls(loc);
214
215 if (auto existing = mod.lookupSymbol<cir::FuncOp>("__clang_call_terminate")) {
216 clangCallTerminateFunc = existing;
217 return;
218 }
219
220 auto funcTy = cir::FuncType::get({voidPtrType}, voidType, /*isVarArg=*/false);
221 builder.setInsertionPointToEnd(mod.getBody());
222 auto funcOp =
223 cir::FuncOp::create(builder, loc, "__clang_call_terminate", funcTy);
224 funcOp.setLinkage(cir::GlobalLinkageKind::LinkOnceODRLinkage);
225 funcOp.setGlobalVisibility(cir::VisibilityKind::Hidden);
226
227 mlir::Block *entryBlock = funcOp.addEntryBlock();
228 builder.setInsertionPointToStart(entryBlock);
229 mlir::Value exnArg = entryBlock->getArgument(0);
230
231 auto catchCall = cir::CallOp::create(
232 builder, loc, mlir::FlatSymbolRefAttr::get(beginCatchFunc), u8PtrType,
233 mlir::ValueRange{exnArg});
234 catchCall.setNothrowAttr(builder.getUnitAttr());
235
236 auto terminateFuncDecl = getOrCreateRuntimeFuncDecl(
237 mod, loc, "_ZSt9terminatev",
238 cir::FuncType::get({}, voidType, /*isVarArg=*/false));
239 terminateFuncDecl->setAttr(cir::CIRDialect::getNoReturnAttrName(),
240 builder.getUnitAttr());
241 auto terminateCall = cir::CallOp::create(
242 builder, loc, mlir::FlatSymbolRefAttr::get(terminateFuncDecl), voidType,
243 mlir::ValueRange{});
244 terminateCall.setNothrowAttr(builder.getUnitAttr());
245 terminateCall->setAttr(cir::CIRDialect::getNoReturnAttrName(),
246 builder.getUnitAttr());
247
248 cir::UnreachableOp::create(builder, loc);
249
250 funcOp->setAttr(cir::CIRDialect::getNoReturnAttrName(),
251 builder.getUnitAttr());
252 clangCallTerminateFunc = funcOp;
253}
254
255/// Create a terminate landing pad block at the end of the specified function.
256mlir::Block *ItaniumEHLowering::buildTerminateBlock(cir::FuncOp funcOp,
257 mlir::Location loc) {
258 assert(clangCallTerminateFunc &&
259 "ensureClangCallTerminate must run before buildTerminateBlock");
260 mlir::Region &body = funcOp.getRegion();
261 mlir::Block *terminateBlock = builder.createBlock(&body, body.end());
262 auto inflight = cir::EhInflightOp::create(
263 builder, loc, /*cleanup=*/false, /*catch_all=*/true,
264 /*catch_type_list=*/mlir::ArrayAttr{});
265 auto terminateCall = cir::CallOp::create(
266 builder, loc, mlir::FlatSymbolRefAttr::get(clangCallTerminateFunc),
267 voidType, mlir::ValueRange{inflight.getExceptionPtr()});
268 terminateCall.setNothrowAttr(builder.getUnitAttr());
269 terminateCall->setAttr(cir::CIRDialect::getNoReturnAttrName(),
270 builder.getUnitAttr());
271 cir::UnreachableOp::create(builder, loc);
272 return terminateBlock;
273}
274
275/// Lower all EH operations in a single function.
276mlir::LogicalResult ItaniumEHLowering::lowerFunc(cir::FuncOp funcOp) {
277 if (funcOp.isDeclaration())
278 return mlir::success();
279
280 // All EH lowering follows from cir.eh.initiate operations. The token each
281 // initiate produces connects it to every other EH op in the function
282 // (begin/end_cleanup, eh.dispatch, begin/end_catch, resume) through the
283 // token graph. A single walk to collect initiates is therefore sufficient.
284 SmallVector<cir::EhInitiateOp> initiateOps;
285 funcOp.walk([&](cir::EhInitiateOp op) { initiateOps.push_back(op); });
286 if (initiateOps.empty())
287 return mlir::success();
288
289 ensureRuntimeDecls(funcOp.getLoc());
290
291 // Set the personality function if it is not already set.
292 // TODO(cir): The personality function should already have been set by this
293 // point. If we've seen a try operation, it will have been set by
294 // emitCXXTryStmt. If we only have cleanups, it may not have been set. We
295 // need to fix that in CodeGen. This is a placeholder until that is done.
296 if (!funcOp.getPersonality())
297 funcOp.setPersonality(kGxxPersonality);
298
299 // Lower each initiate and all EH ops connected to it. The token map is
300 // shared across all initiate operations. Multiple initiates may flow into the
301 // same dispatch block, and the map ensures the arguments are registered
302 // only once. Dispatch ops are scheduled for deferred removal so that sibling
303 // initiates can still read catch types from a shared dispatch.
304 EhTokenMap ehTokenMap;
305 SmallVector<mlir::Operation *> deadOps;
306 for (cir::EhInitiateOp initiateOp : initiateOps)
307 if (mlir::failed(lowerEhInitiate(initiateOp, ehTokenMap, deadOps)))
308 return mlir::failure();
309
310 // Erase operations that were deferred during per-initiate processing
311 // (dispatch ops whose catch types were read by multiple initiates).
312 for (mlir::Operation *op : deadOps)
313 op->erase();
314
315 // Remove the !cir.eh_token block arguments that were replaced by (ptr, u32)
316 // pairs. Iterate in reverse to preserve argument indices during removal.
317 for (mlir::Block &block : funcOp.getBody()) {
318 for (int i = block.getNumArguments() - 1; i >= 0; --i) {
319 if (mlir::isa<cir::EhTokenType>(block.getArgument(i).getType()))
320 block.eraseArgument(i);
321 }
322 }
323
324 // Lower any cir.init_catch_param ops in this function. These materialize
325 // the catch parameter local from the (already lowered) begin_catch result,
326 // and are independent of the eh_token graph traversal above.
327 SmallVector<cir::InitCatchParamOp> initCatchOps;
328 funcOp.walk([&](cir::InitCatchParamOp op) { initCatchOps.push_back(op); });
329 for (cir::InitCatchParamOp op : initCatchOps)
330 lowerInitCatchParam(op);
331
332 return mlir::success();
333}
334
335/// Lower all EH operations connected to a single cir.eh.initiate.
336///
337/// The cir.eh.initiate is the root of a token graph. The token it produces
338/// flows through branch edges to consuming operations:
339///
340/// cir.eh.initiate → (via cir.br) → cir.begin_cleanup
341/// → cir.end_cleanup (via cleanup_token)
342/// → (via cir.br) → cir.eh.dispatch
343/// → (successors) →
344/// cir.begin_catch
345/// → cir.end_catch
346/// (via catch_token)
347/// → cir.resume
348///
349/// A single traversal of the token graph discovers and processes every
350/// connected op inline. The inflight_exception is created up-front without
351/// a catch_type_list; when the dispatch is encountered during traversal,
352/// the catch types are read and set on the inflight op.
353///
354/// Dispatch ops are not erased during per-initiate processing because they may
355/// be used by other initiate ops that haven't yet been lowered. Instead they
356/// are added to \p deadOps and erased by the caller after all initiates have
357/// been lowered.
358///
359/// \p ehTokenMap is shared across all initiates in the function so that block
360/// arguments reachable from multiple sibling initiates are registered once.
361mlir::LogicalResult ItaniumEHLowering::lowerEhInitiate(
362 cir::EhInitiateOp initiateOp, EhTokenMap &ehTokenMap,
363 SmallVectorImpl<mlir::Operation *> &deadOps) {
364 mlir::Value rootToken = initiateOp.getEhToken();
365
366 // Create the inflight_exception without a catch_type_list. The catch types
367 // will be set once we encounter the dispatch during the traversal below.
368 builder.setInsertionPoint(initiateOp);
369 auto inflightOp = cir::EhInflightOp::create(
370 builder, initiateOp.getLoc(), /*cleanup=*/initiateOp.getCleanup(),
371 /*catch_all=*/false,
372 /*catch_type_list=*/mlir::ArrayAttr{});
373
374 ehTokenMap[rootToken] = {inflightOp.getExceptionPtr(),
375 inflightOp.getTypeId()};
376
377 // Single traversal of the token graph. For each token value (the root token
378 // or a block argument that carries it), we snapshot its users, register
379 // (ptr, u32) replacement arguments on successor blocks, then process every
380 // user inline. This avoids collecting ops into separate vectors.
381 SmallVector<mlir::Value> worklist;
382 SmallPtrSet<mlir::Value, 8> visited;
383 worklist.push_back(rootToken);
384
385 while (!worklist.empty()) {
386 mlir::Value current = worklist.pop_back_val();
387 if (!visited.insert(current).second)
388 continue;
389
390 // Snapshot users before modifying any of them (erasing ops during
391 // iteration would invalidate the use-list iterator).
392 SmallVector<mlir::Operation *> users;
393 for (mlir::OpOperand &use : current.getUses())
394 users.push_back(use.getOwner());
395
396 // Register replacement block arguments on successor blocks (extending the
397 // worklist), then lower the op itself.
398 for (mlir::Operation *user : users) {
399 // Trace into successor blocks to register (ptr, u32) replacement
400 // arguments for any !cir.eh_token block arguments found there. Even
401 // if a block arg was already registered by a sibling initiate, it is
402 // still added to the worklist so that the traversal can reach the
403 // shared dispatch to read catch types.
404 for (unsigned s = 0; s < user->getNumSuccessors(); ++s) {
405 mlir::Block *succ = user->getSuccessor(s);
406 for (mlir::BlockArgument arg : succ->getArguments()) {
407 if (!mlir::isa<cir::EhTokenType>(arg.getType()))
408 continue;
409 if (!ehTokenMap.count(arg)) {
410 mlir::Value ptrArg = succ->addArgument(voidPtrType, arg.getLoc());
411 mlir::Value u32Arg = succ->addArgument(u32Type, arg.getLoc());
412 ehTokenMap[arg] = {ptrArg, u32Arg};
413 }
414 worklist.push_back(arg);
415 }
416 }
417
418 if (auto op = mlir::dyn_cast<cir::BeginCleanupOp>(user)) {
419 // begin_cleanup / end_cleanup are no-ops for Itanium. Erase the
420 // end_cleanup first (drops the cleanup_token use) then the begin.
421 for (auto &tokenUsers :
422 llvm::make_early_inc_range(op.getCleanupToken().getUses())) {
423 if (auto endOp =
424 mlir::dyn_cast<cir::EndCleanupOp>(tokenUsers.getOwner()))
425 endOp.erase();
426 }
427 op.erase();
428 } else if (auto op = mlir::dyn_cast<cir::BeginCatchOp>(user)) {
429 // Replace end_catch → __cxa_end_catch (drops the catch_token use),
430 // then replace begin_catch → __cxa_begin_catch.
431 for (auto &tokenUsers :
432 llvm::make_early_inc_range(op.getCatchToken().getUses())) {
433 if (auto endOp =
434 mlir::dyn_cast<cir::EndCatchOp>(tokenUsers.getOwner())) {
435 builder.setInsertionPoint(endOp);
436 cir::CallOp::create(builder, endOp.getLoc(),
437 mlir::FlatSymbolRefAttr::get(endCatchFunc),
438 voidType, mlir::ValueRange{});
439 endOp.erase();
440 }
441 }
442
443 auto [exnPtr, typeId] = ehTokenMap.lookup(op.getEhToken());
444 builder.setInsertionPoint(op);
445 auto callOp = cir::CallOp::create(
446 builder, op.getLoc(), mlir::FlatSymbolRefAttr::get(beginCatchFunc),
447 u8PtrType, mlir::ValueRange{exnPtr});
448 mlir::Value castResult = callOp.getResult();
449 mlir::Type expectedPtrType = op.getExnPtr().getType();
450 if (castResult.getType() != expectedPtrType)
451 castResult =
452 cir::CastOp::create(builder, op.getLoc(), expectedPtrType,
453 cir::CastKind::bitcast, callOp.getResult());
454 op.getExnPtr().replaceAllUsesWith(castResult);
455 op.erase();
456 } else if (auto op = mlir::dyn_cast<cir::ConstructCatchParamOp>(user)) {
457 auto [exnPtr, typeId] = ehTokenMap.lookup(op.getEhToken());
458 if (mlir::failed(lowerConstructCatchParam(op, exnPtr)))
459 return mlir::failure();
460 } else if (auto op = mlir::dyn_cast<cir::EhDispatchOp>(user)) {
461 // Read catch types from the dispatch and set them on the inflight op.
462 mlir::ArrayAttr catchTypes = op.getCatchTypesAttr();
463 if (catchTypes && catchTypes.size() > 0) {
464 SmallVector<mlir::Attribute> typeSymbols;
465 for (mlir::Attribute attr : catchTypes)
466 typeSymbols.push_back(
467 mlir::cast<cir::GlobalViewAttr>(attr).getSymbol());
468 inflightOp.setCatchTypeListAttr(builder.getArrayAttr(typeSymbols));
469 }
470 if (op.getDefaultIsCatchAll())
471 inflightOp.setCatchAllAttr(builder.getUnitAttr());
472 // Only lower the dispatch once. A sibling initiate sharing the same
473 // dispatch will still read its catch types (above), but the comparison
474 // chain and branch replacement are only created the first time.
475 if (!llvm::is_contained(deadOps, op.getOperation())) {
476 auto [exnPtr, typeId] = ehTokenMap.lookup(op.getEhToken());
477 lowerDispatch(op, exnPtr, typeId, deadOps);
478 }
479 } else if (auto op = mlir::dyn_cast<cir::EhTerminateOp>(user)) {
480 auto [exnPtr, typeId] = ehTokenMap.lookup(op.getEhToken());
481 ensureClangCallTerminate(op.getLoc());
482 builder.setInsertionPoint(op);
483 auto call = cir::CallOp::create(
484 builder, op.getLoc(),
485 mlir::FlatSymbolRefAttr::get(clangCallTerminateFunc), voidType,
486 mlir::ValueRange{exnPtr});
487 call.setNothrowAttr(builder.getUnitAttr());
488 call->setAttr(cir::CIRDialect::getNoReturnAttrName(),
489 builder.getUnitAttr());
490 cir::UnreachableOp::create(builder, op.getLoc());
491 op.erase();
492 } else if (auto op = mlir::dyn_cast<cir::ResumeOp>(user)) {
493 auto [exnPtr, typeId] = ehTokenMap.lookup(op.getEhToken());
494 builder.setInsertionPoint(op);
495 cir::ResumeFlatOp::create(builder, op.getLoc(), exnPtr, typeId);
496 op.erase();
497 } else if (auto op = mlir::dyn_cast<cir::BrOp>(user)) {
498 // Replace eh_token operands with the (ptr, u32) pair.
499 SmallVector<mlir::Value> newOperands;
500 bool changed = false;
501 for (mlir::Value operand : op.getDestOperands()) {
502 auto it = ehTokenMap.find(operand);
503 if (it != ehTokenMap.end()) {
504 newOperands.push_back(it->second.first);
505 newOperands.push_back(it->second.second);
506 changed = true;
507 } else {
508 newOperands.push_back(operand);
509 }
510 }
511 if (changed) {
512 builder.setInsertionPoint(op);
513 cir::BrOp::create(builder, op.getLoc(), op.getDest(), newOperands);
514 op.erase();
515 }
516 }
517 }
518 }
519
520 initiateOp.erase();
521 return mlir::success();
522}
523
524/// Lower a cir.eh.dispatch by creating a comparison chain in new blocks.
525/// The dispatch itself is replaced with a branch to the first comparison
526/// block and added to deadOps for deferred removal.
527void ItaniumEHLowering::lowerDispatch(
528 cir::EhDispatchOp dispatch, mlir::Value exnPtr, mlir::Value typeId,
529 SmallVectorImpl<mlir::Operation *> &deadOps) {
530 mlir::Location dispLoc = dispatch.getLoc();
531 mlir::Block *defaultDest = dispatch.getDefaultDestination();
532 mlir::ArrayAttr catchTypes = dispatch.getCatchTypesAttr();
533 mlir::SuccessorRange catchDests = dispatch.getCatchDestinations();
534 mlir::Block *dispatchBlock = dispatch->getBlock();
535
536 // Build the comparison chain in new blocks inserted after the dispatch's
537 // block. The dispatch itself is replaced with a branch to the first
538 // comparison block and scheduled for deferred removal.
539 if (!catchTypes || catchTypes.empty()) {
540 // No typed catches: replace dispatch with a direct branch.
541 builder.setInsertionPoint(dispatch);
542 cir::BrOp::create(builder, dispLoc, defaultDest,
543 mlir::ValueRange{exnPtr, typeId});
544 } else {
545 unsigned numCatches = catchTypes.size();
546
547 // Create and populate comparison blocks in reverse order so that each
548 // block's false destination (the next comparison block, or defaultDest
549 // for the last one) is already available. Each createBlock inserts
550 // before the previous one, so the blocks end up in forward order.
551 mlir::Block *insertBefore = dispatchBlock->getNextNode();
552 mlir::Block *falseDest = defaultDest;
553 mlir::Block *firstCmpBlock = nullptr;
554 for (int i = numCatches - 1; i >= 0; --i) {
555 auto *cmpBlock = builder.createBlock(insertBefore, {voidPtrType, u32Type},
556 {dispLoc, dispLoc});
557
558 mlir::Value cmpExnPtr = cmpBlock->getArgument(0);
559 mlir::Value cmpTypeId = cmpBlock->getArgument(1);
560
561 auto globalView = mlir::cast<cir::GlobalViewAttr>(catchTypes[i]);
562 auto ehTypeIdOp =
563 cir::EhTypeIdOp::create(builder, dispLoc, globalView.getSymbol());
564 auto cmpOp = cir::CmpOp::create(builder, dispLoc, cir::CmpOpKind::eq,
565 cmpTypeId, ehTypeIdOp.getTypeId());
566
567 cir::BrCondOp::create(builder, dispLoc, cmpOp, catchDests[i], falseDest,
568 mlir::ValueRange{cmpExnPtr, cmpTypeId},
569 mlir::ValueRange{cmpExnPtr, cmpTypeId});
570
571 insertBefore = cmpBlock;
572 falseDest = cmpBlock;
573 firstCmpBlock = cmpBlock;
574 }
575
576 // Replace the dispatch with a branch to the first comparison block.
577 builder.setInsertionPoint(dispatch);
578 cir::BrOp::create(builder, dispLoc, firstCmpBlock,
579 mlir::ValueRange{exnPtr, typeId});
580 }
581
582 // Schedule the dispatch for deferred removal. We cannot erase it now because
583 // a sibling initiate that shares this dispatch may still need to read its
584 // catch types.
585 deadOps.push_back(dispatch);
586}
587
588mlir::FailureOr<cir::FuncOp>
589ItaniumEHLowering::resolveCatchCopyThunk(cir::ConstructCatchParamOp op) {
590 mlir::FlatSymbolRefAttr thunkRef = op.getCopyFnAttr();
591 mlir::StringAttr thunkName = thunkRef.getAttr();
592 auto cached = catchCopyThunks.find(thunkName);
593 if (cached != catchCopyThunks.end())
594 return cached->second;
595
596 cir::FuncOp thunk = mod.lookupSymbol<cir::FuncOp>(thunkRef);
597 if (!thunk)
598 return op.emitError("could not resolve catch-copy thunk symbol");
599 assert(thunk->hasAttr(cir::CIRDialect::getCatchCopyThunkAttrName()) &&
600 "verifier should have rejected non-thunk catch-copy reference");
601 if (thunk.isDeclaration())
602 return op.emitError("catch-copy thunk has no body to inline");
603
604 mlir::Region &thunkRegion = thunk.getRegion();
605 if (!llvm::hasSingleElement(thunkRegion))
606 return op.emitError("multi-block catch-copy thunks are NYI");
607
608 mlir::Block &thunkEntry = thunkRegion.front();
609 assert(thunkEntry.getNumArguments() == 2 &&
610 "catch-copy thunk must have exactly two parameters");
611 if (!mlir::isa<cir::ReturnOp>(thunkEntry.getTerminator()))
612 return op.emitError("catch-copy thunk must end in cir.return");
613
614 catchCopyThunks[thunkName] = thunk;
615 return thunk;
616}
617
618/// Lower a cir.construct_catch_param into the Itanium-specific sequence
619/// that runs before `__cxa_begin_catch` to bind the catch parameter to the
620/// in-flight exception.
621mlir::LogicalResult
622ItaniumEHLowering::lowerConstructCatchParam(cir::ConstructCatchParamOp op,
623 mlir::Value exnPtr) {
624 if (op.getKind() != cir::InitCatchKind::NonTrivialCopy)
625 return op.emitError(
626 "ConstructCatchParam: only non_trivial_copy is supported");
627
628 mlir::Location loc = op.getLoc();
629 ensureRuntimeDecls(loc);
630 ensureClangCallTerminate(loc);
631
632 mlir::Value paramAddr = op.getParamAddr();
633 cir::PointerType paramAddrType =
634 mlir::cast<cir::PointerType>(paramAddr.getType());
635
636 // Call __cxa_get_exception_ptr to get the in-flight exception.
637 builder.setInsertionPoint(op);
638 cir::CallOp getExnCall = cir::CallOp::create(
639 builder, loc, mlir::FlatSymbolRefAttr::get(getExceptionPtrFunc),
640 u8PtrType, mlir::ValueRange{exnPtr});
641 getExnCall.setNothrowAttr(builder.getUnitAttr());
642 mlir::Value adjusted =
643 cir::CastOp::create(builder, loc, paramAddrType, cir::CastKind::bitcast,
644 getExnCall.getResult());
645
646 // Get the thunk function definition.
647 mlir::FailureOr<cir::FuncOp> thunkOr = resolveCatchCopyThunk(op);
648 if (mlir::failed(thunkOr))
649 return mlir::failure();
650 cir::FuncOp thunk = *thunkOr;
651
652 // This is also verified by resolveCatchCopyThunk, but the loop below is
653 // where the constraint is required so let's assert it again here.
654 assert(llvm::hasSingleElement(thunk.getRegion()) &&
655 "multi-block catch-copy thunks are NYI");
656
657 // Clone the thunk function to perform the copy.
658 mlir::Block &thunkEntry = thunk.getRegion().front();
659 mlir::IRMapping mapping;
660 mapping.map(thunkEntry.getArgument(0), paramAddr);
661 mapping.map(thunkEntry.getArgument(1), adjusted);
662 llvm::SmallVector<cir::CallOp> throwingCalls;
663 for (mlir::Operation &thunkOp : thunkEntry.without_terminator()) {
664 mlir::Operation *cloned = builder.clone(thunkOp, mapping);
665 if (cir::CallOp callOp = mlir::dyn_cast<cir::CallOp>(cloned))
666 if (!callOp.getNothrow())
667 throwingCalls.push_back(callOp);
668 }
669 op.erase();
670
671 if (throwingCalls.empty())
672 return mlir::success();
673
674 // All calls in the copy (which is usually just a single call) need to
675 // unwind to a terminate block if it throws an exception.
676 mlir::IRRewriter rewriter(builder);
677 mlir::Block *terminateBlock = nullptr;
678 for (cir::CallOp call : throwingCalls) {
679 if (!terminateBlock)
680 terminateBlock = buildTerminateBlock(call->getParentOfType<cir::FuncOp>(),
681 call.getLoc());
682 cir::replaceCallWithTryCall(call, terminateBlock, call.getLoc(), rewriter);
683 }
684 return mlir::success();
685}
686
687/// Lower a cir.init_catch_param into the Itanium-specific sequence that
688/// materializes the catch parameter's local variable from the exception
689/// pointer returned by __cxa_begin_catch. The shape of the lowering
690/// depends on the init catch kind:
691///
692/// - Reference: the begin_catch result is
693/// the pointer value itself, so just bitcast and store it into the alloca
694/// except if it reference of pointer of record.
695/// - Pointer: the begin_catch result is
696/// the pointer value itself, so just bitcast and store it into the
697/// alloca.
698/// - Scalar (any other by-value catch): treat the begin_catch result as a
699/// pointer to the value, load it, and store it into the alloca.
700/// - Objc: Handle pointer representation with ObjCLifetime.
701/// - TrivialCopy: copy the exception
702/// object's bytes into the alloca via cir.copy.
703/// - NonTrivialCopy: the construction was already performed by the
704/// companion `cir.construct_catch_param` before `cir.begin_catch`, so
705/// this lowering is a no-op.
706///
707void ItaniumEHLowering::lowerInitCatchParam(cir::InitCatchParamOp op) {
708 builder.setInsertionPoint(op);
709 mlir::Location loc = op.getLoc();
710 mlir::Value exnPtr = op.getExnPtr();
711 mlir::Value paramAddr = op.getParamAddr();
712 auto paramAddrType = mlir::cast<cir::PointerType>(paramAddr.getType());
713 mlir::Type elementType = paramAddrType.getPointee();
714 cir::InitCatchKind kind = op.getKind();
715
716 switch (kind) {
717 case InitCatchKind::Reference: {
718 // We have no way to tell the personality function that we're
719 // catching by reference, so if we're catching a pointer,
720 // __cxa_begin_catch will actually return that pointer by value.
721 if (const auto ref = mlir::dyn_cast<cir::PointerType>(elementType)) {
722 // When catching by reference, generally we should just ignore
723 // this by-value pointer and use the exception object instead.
724 if (auto ptr = mlir::dyn_cast<cir::PointerType>(ref.getPointee()))
725 if (!mlir::isa<cir::RecordType>(ptr.getPointee()))
726 llvm_unreachable(
727 "InitCatchParam: reference of pointer or non-record is NYI");
728 }
729
730 mlir::Value casted = cir::CastOp::create(builder, loc, elementType,
731 cir::CastKind::bitcast, exnPtr);
732 cir::StoreOp::create(builder, loc, casted, paramAddr, {}, {}, {}, {});
733 break;
734 }
735 case InitCatchKind::TrivialCopy: {
736 mlir::Value srcPtr = cir::CastOp::create(builder, loc, paramAddrType,
737 cir::CastKind::bitcast, exnPtr);
738 cir::CopyOp::create(builder, loc, paramAddr, srcPtr, {}, {});
739 break;
740 }
741 case InitCatchKind::NonTrivialCopy:
742 // The non-trivial copy was performed by the matching
743 // cir.construct_catch_param before cir.begin_catch.
744 break;
745 case InitCatchKind::Scalar: {
746 // Scalar by-value catch (integer, float, complex, etc.). The begin_catch
747 // result points into the exception object; load the value through a
748 // typed pointer and store it into the alloca.
749 mlir::Value srcPtr = cir::CastOp::create(builder, loc, paramAddrType,
750 cir::CastKind::bitcast, exnPtr);
751 auto loadOp = cir::LoadOp::create(builder, loc, elementType, srcPtr);
752 cir::StoreOp::create(builder, loc, loadOp.getResult(), paramAddr, {}, {},
753 {}, {});
754 break;
755 }
756 case InitCatchKind::Pointer: {
757 mlir::Value casted = cir::CastOp::create(builder, loc, elementType,
758 cir::CastKind::bitcast, exnPtr);
759 cir::StoreOp::create(builder, loc, casted, paramAddr, {}, {}, {}, {});
760 break;
761 }
762 case InitCatchKind::Objc:
763 llvm_unreachable("InitCatchParam: ObjCLifetime is NYI");
764 break;
765 }
766
767 op.erase();
768}
769
770//===----------------------------------------------------------------------===//
771// The Pass
772//===----------------------------------------------------------------------===//
773
774struct CIREHABILoweringPass
775 : public impl::CIREHABILoweringBase<CIREHABILoweringPass> {
776 CIREHABILoweringPass() = default;
777 void runOnOperation() override;
778};
779
780/// Erase all catch-init thunks after the EHABI lowering. CIRGen emits a thunk
781/// for every `cir.construct_catch_param` op, but those uses should all have
782/// been replaced during the lowering.
783static void eraseCatchCopyThunks(mlir::ModuleOp mod) {
784 llvm::StringRef catchHelperAttr =
785 cir::CIRDialect::getCatchCopyThunkAttrName();
786 for (cir::FuncOp f : llvm::make_early_inc_range(mod.getOps<cir::FuncOp>())) {
787 if (!f->hasAttr(catchHelperAttr))
788 continue;
789 // This is an expensive check, so we need to rely on the implementation
790 // to have done the right thing.
791 assert(mlir::SymbolTable::symbolKnownUseEmpty(f, mod) &&
792 "catch-init helper has remaining users");
793 f.erase();
794 }
795}
796
797void CIREHABILoweringPass::runOnOperation() {
798 auto mod = mlir::cast<mlir::ModuleOp>(getOperation());
799
800 // The target triple is attached to the module as the "cir.triple"
801 // attribute. If it is absent (e.g. a CIR module parsed from text without a
802 // triple) we cannot determine the ABI and must skip the pass.
803 auto tripleAttr = mlir::dyn_cast_if_present<mlir::StringAttr>(
804 mod->getAttr(cir::CIRDialect::getTripleAttrName()));
805 if (!tripleAttr) {
806 mod.emitError("Module has no target triple");
807 return;
808 }
809
810 // Select the ABI-specific lowering handler from the triple. The Microsoft
811 // C++ ABI targets a Windows MSVC environment; everything else uses Itanium.
812 // Extend this when Microsoft ABI lowering is added.
813 llvm::Triple triple(tripleAttr.getValue());
814 std::unique_ptr<EHABILowering> lowering;
815 if (triple.isWindowsMSVCEnvironment()) {
816 mod.emitError(
817 "EH ABI lowering is not yet implemented for the Microsoft ABI");
818 return signalPassFailure();
819 } else {
820 lowering = std::make_unique<ItaniumEHLowering>(mod);
821 }
822
823 if (mlir::failed(lowering->run()))
824 return signalPassFailure();
825
826 // Sweep away any the thunk functions. They've been inlined to all users now.
827 eraseCatchCopyThunks(mod);
828}
829
830} // namespace
831
832std::unique_ptr<Pass> mlir::createCIREHABILoweringPass() {
833 return std::make_unique<CIREHABILoweringPass>();
834}
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 float __ockl_bool s
mlir::Block * replaceCallWithTryCall(cir::CallOp callOp, mlir::Block *unwindDest, mlir::Location loc, mlir::RewriterBase &rewriter)
Replace a cir::CallOp with a cir::TryCallOp whose unwind destination is unwindDest.
ASTEdit insertBefore(RangeSelector S, TextGenerator Replacement)
Inserts Replacement before S, leaving the source selected by \S unchanged.
Stencil run(MatchConsumer< std::string > C)
Wraps a MatchConsumer in a Stencil, so that it can be used in a Stencil.
Definition Stencil.cpp:489
RangeSelector name(std::string ID)
Given a node with a "name", (like NamedDecl, DeclRefExpr, CxxCtorInitializer, and TypeLoc) selects th...
std::unique_ptr< Pass > createCIREHABILoweringPass()
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)