clang 23.0.0git
CIRGenCleanup.cpp
Go to the documentation of this file.
1//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains code dealing with the IR generation for cleanups
10// and related information.
11//
12// A "cleanup" is a piece of code which needs to be executed whenever
13// control transfers out of a particular scope. This can be
14// conditionalized to occur only on exceptional control flow, only on
15// normal control flow, or both.
16//
17//===----------------------------------------------------------------------===//
18
19#include "CIRGenCleanup.h"
20#include "CIRGenFunction.h"
21
24
25using namespace clang;
26using namespace clang::CIRGen;
27
28namespace {
29/// Return true if the expression tree contains an AbstractConditionalOperator
30/// (ternary ?:), which is the only construct whose CIR codegen calls
31/// ConditionalEvaluation::beginEvaluation() and thus causes cleanups to be
32/// deferred via pushFullExprCleanup. Logical &&/|| do NOT call
33/// beginEvaluation(); their branch-local cleanups are handled by LexicalScope.
34class ConditionalEvaluationFinder
35 : public RecursiveASTVisitor<ConditionalEvaluationFinder> {
36 bool foundConditional = false;
37
38public:
39 bool found() const { return foundConditional; }
40
41 bool VisitAbstractConditionalOperator(AbstractConditionalOperator *) {
42 foundConditional = true;
43 return false;
44 }
45
46 // Don't cross evaluation-context boundaries.
47 bool TraverseLambdaExpr(LambdaExpr *) { return true; }
48 bool TraverseBlockExpr(BlockExpr *) { return true; }
49 bool TraverseStmtExpr(StmtExpr *) { return true; }
50};
51} // namespace
52
53//===----------------------------------------------------------------------===//
54// CIRGenFunction cleanup related
55//===----------------------------------------------------------------------===//
56
57/// Emits all the code to cause the given temporary to be cleaned up.
59 QualType tempType, Address ptr) {
61}
62
64 assert(isInConditionalBranch());
65 mlir::Location loc = builder.getUnknownLoc();
66
67 // Place the alloca in the function entry block so it dominates everything,
68 // including both regions of any enclosing cir.cleanup.scope. We can't rely
69 // on the default curLexScope path because we may be inside a ternary branch
70 // whose LexicalScope would capture the alloca.
72 builder.getBoolTy(), CharUnits::One(), loc, "cleanup.cond",
73 /*arraySize=*/nullptr,
74 builder.getBestAllocaInsertPoint(getCurFunctionEntryBlock()));
75
76 // Initialize to false before the outermost conditional.
77 {
78 mlir::OpBuilder::InsertionGuard guard(builder);
79 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
80 builder.createFlagStore(loc, false, active.getPointer());
81 }
82
83 // Set to true at the current location (inside the conditional branch).
84 builder.createFlagStore(loc, true, active.getPointer());
85
86 return active;
87}
88
92
95 assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?");
96 cleanup.setActiveFlag(activeFlag);
97
98 cleanup.setTestFlagInNormalCleanup(cleanup.isNormalCleanup());
99 cleanup.setTestFlagInEHCleanup(cleanup.isEHCleanup());
100}
101
103 const Expr *subExpr)
104 : cgf(cgf), cleanups(cgf), scope(nullptr),
105 deferredCleanupStackSize(cgf.deferredConditionalCleanupStack.size()) {
106
107 assert(subExpr && "ExprWithCleanups always has a sub-expression");
108 ConditionalEvaluationFinder finder;
109 finder.TraverseStmt(const_cast<Expr *>(subExpr));
110 if (finder.found()) {
111 mlir::Location loc = cgf.builder.getUnknownLoc();
112 cir::CleanupKind cleanupKind = cgf.getLangOpts().Exceptions
113 ? cir::CleanupKind::All
114 : cir::CleanupKind::Normal;
115 scope = cir::CleanupScopeOp::create(
116 cgf.builder, loc, cleanupKind,
117 /*bodyBuilder=*/
118 [&](mlir::OpBuilder &b, mlir::Location loc) {},
119 /*cleanupBuilder=*/
120 [&](mlir::OpBuilder &b, mlir::Location loc) {});
121 cgf.builder.setInsertionPointToEnd(&scope.getBodyRegion().front());
122 }
123}
124
126 ArrayRef<mlir::Value *> valuesToReload) {
127 assert(!exited && "FullExprCleanupScope::exit called twice");
128 exited = true;
129
130 size_t oldSize = deferredCleanupStackSize;
131 bool hasDeferredCleanups =
132 cgf.deferredConditionalCleanupStack.size() > oldSize;
133
134 if (!scope) {
135 cgf.deferredConditionalCleanupStack.truncate(oldSize);
136 cleanups.forceCleanup(valuesToReload);
137 return;
138 }
139
140 // Spill any values that callers need after the scope is closed.
141 SmallVector<Address> tempAllocas;
142 for (mlir::Value *valPtr : valuesToReload) {
143 mlir::Value val = *valPtr;
144 if (!val) {
145 tempAllocas.push_back(Address::invalid());
146 continue;
147 }
148 Address temp = cgf.createDefaultAlignTempAlloca(val.getType(), val.getLoc(),
149 "tmp.exprcleanup");
150 tempAllocas.push_back(temp);
151 cgf.builder.createStore(val.getLoc(), val, temp);
152 }
153
154 // Pop any EH and lifetime-extended cleanups that were pushed during
155 // the expression (e.g. temporary destructors).
156 cleanups.forceCleanup();
157
158 // Make sure the cleanup scope body region has a terminator.
159 {
160 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
161 mlir::Block &lastBodyBlock = scope.getBodyRegion().back();
162 cgf.builder.setInsertionPointToEnd(&lastBodyBlock);
163 if (lastBodyBlock.empty() ||
164 !lastBodyBlock.back().hasTrait<mlir::OpTrait::IsTerminator>())
165 cgf.builder.createYield(scope.getLoc());
166 }
167
168 // Emit any deferred cleanups.
169 {
170 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
171 mlir::Block &cleanupBlock = scope.getCleanupRegion().front();
172 cgf.builder.setInsertionPointToEnd(&cleanupBlock);
173
174 if (hasDeferredCleanups) {
175 for (const PendingCleanupEntry &entry : llvm::reverse(llvm::make_range(
176 cgf.deferredConditionalCleanupStack.begin() + oldSize,
177 cgf.deferredConditionalCleanupStack.end()))) {
178 if (entry.activeFlag.isValid()) {
179 mlir::Value flag =
180 cgf.builder.createLoad(scope.getLoc(), entry.activeFlag);
181 cir::IfOp::create(
182 cgf.builder, scope.getLoc(), flag, /*withElseRegion=*/false,
183 [&](mlir::OpBuilder &b, mlir::Location loc) {
184 cgf.emitDestroy(entry.addr, entry.type, entry.destroyer);
185 cgf.builder.createYield(loc);
186 });
187 } else {
188 cgf.emitDestroy(entry.addr, entry.type, entry.destroyer);
189 }
190 }
191 }
192 cgf.builder.createYield(scope.getLoc());
193 }
194
195 cgf.deferredConditionalCleanupStack.truncate(oldSize);
196 cgf.builder.setInsertionPointAfter(scope);
197
198 // Reload spilled values now that the builder is after the closed scope.
199 for (auto [addr, valPtr] : llvm::zip(tempAllocas, valuesToReload)) {
200 if (!addr.isValid())
201 continue;
202 *valPtr = cgf.builder.createLoad(valPtr->getLoc(), addr);
203 }
204}
205
206//===----------------------------------------------------------------------===//
207// EHScopeStack
208//===----------------------------------------------------------------------===//
209
210void EHScopeStack::Cleanup::anchor() {}
211
214 stable_iterator si = getInnermostNormalCleanup();
215 stable_iterator se = stable_end();
216 while (si != se) {
217 EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
218 if (cleanup.isActive())
219 return si;
220 si = cleanup.getEnclosingNormalCleanup();
221 }
222 return stable_end();
223}
224
225/// Push an entry of the given size onto this protected-scope stack.
226char *EHScopeStack::allocate(size_t size) {
227 size = llvm::alignTo(size, ScopeStackAlignment);
228 if (!startOfBuffer) {
229 unsigned capacity = llvm::PowerOf2Ceil(std::max<size_t>(size, 1024ul));
230 startOfBuffer = std::make_unique<char[]>(capacity);
231 startOfData = endOfBuffer = startOfBuffer.get() + capacity;
232 } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
233 unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
234 unsigned usedCapacity =
235 currentCapacity - (startOfData - startOfBuffer.get());
236 unsigned requiredCapacity = usedCapacity + size;
237 // We know from the 'else if' condition that requiredCapacity is greater
238 // than currentCapacity.
239 unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
240
241 std::unique_ptr<char[]> newStartOfBuffer =
242 std::make_unique<char[]>(newCapacity);
243 char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
244 char *newStartOfData = newEndOfBuffer - usedCapacity;
245 memcpy(newStartOfData, startOfData, usedCapacity);
246 startOfBuffer.swap(newStartOfBuffer);
247 endOfBuffer = newEndOfBuffer;
248 startOfData = newStartOfData;
249 }
250
251 assert(startOfBuffer.get() + size <= startOfData);
252 startOfData -= size;
253 return startOfData;
254}
255
256void EHScopeStack::deallocate(size_t size) {
257 startOfData += llvm::alignTo(size, ScopeStackAlignment);
258}
259
260void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
261 char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
262 bool isNormalCleanup = kind & NormalCleanup;
263 bool isEHCleanup = kind & EHCleanup;
264 bool isLifetimeMarker = kind & LifetimeMarker;
265 bool skipCleanupScope = false;
266
267 cir::CleanupKind cleanupKind = cir::CleanupKind::All;
268 if (isEHCleanup && cgf->getLangOpts().Exceptions) {
269 cleanupKind =
270 isNormalCleanup ? cir::CleanupKind::All : cir::CleanupKind::EH;
271 } else {
272 // Exceptions are disabled (or no EH flag was requested). Drop the EH
273 // flag so the scope entry stays consistent with the op's cleanup kind.
274 isEHCleanup = false;
275 if (isNormalCleanup)
276 cleanupKind = cir::CleanupKind::Normal;
277 else
278 skipCleanupScope = true;
279 }
280
281 cir::CleanupScopeOp cleanupScope = nullptr;
282 if (!skipCleanupScope) {
283 CIRGenBuilderTy &builder = cgf->getBuilder();
284 mlir::Location loc = builder.getUnknownLoc();
285 cleanupScope = cir::CleanupScopeOp::create(
286 builder, loc, cleanupKind,
287 /*bodyBuilder=*/
288 [&](mlir::OpBuilder &b, mlir::Location loc) {
289 // Terminations will be handled in popCleanup
290 },
291 /*cleanupBuilder=*/
292 [&](mlir::OpBuilder &b, mlir::Location loc) {
293 // Terminations will be handled after emiting cleanup
294 });
295
296 builder.setInsertionPointToEnd(&cleanupScope.getBodyRegion().back());
297 }
298
299 // Per C++ [except.terminate], it is implementation-defined whether none,
300 // some, or all cleanups are called before std::terminate. Thus, when
301 // terminate is the current EH scope, we may skip adding any EH cleanup
302 // scopes.
303 if (innermostEHScope != stable_end() &&
304 find(innermostEHScope)->getKind() == EHScope::Terminate)
305 isEHCleanup = false;
306
307 EHCleanupScope *scope = new (buffer)
308 EHCleanupScope(isNormalCleanup, isEHCleanup, size, cleanupScope,
309 innermostNormalCleanup, innermostEHScope);
310
311 if (isNormalCleanup)
312 innermostNormalCleanup = stable_begin();
313
314 if (isEHCleanup)
315 innermostEHScope = stable_begin();
316
317 if (isLifetimeMarker)
318 cgf->cgm.errorNYI("push lifetime marker cleanup");
319
320 // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
321 if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
322 cgf->getTarget().getCXXABI().isMicrosoft())
323 cgf->cgm.errorNYI("push seh cleanup");
324
325 return scope->getCleanupBuffer();
326}
327
329 assert(!empty() && "popping exception stack when not empty");
330
331 assert(isa<EHCleanupScope>(*begin()));
332 EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
333 innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
334 innermostEHScope = cleanup.getEnclosingEHScope();
335 deallocate(cleanup.getAllocatedSize());
336
337 cir::CleanupScopeOp cleanupScope = cleanup.getCleanupScopeOp();
338 if (cleanupScope) {
339 auto *block = &cleanupScope.getBodyRegion().back();
340 if (!block->mightHaveTerminator()) {
341 mlir::OpBuilder::InsertionGuard guard(cgf->getBuilder());
342 cgf->getBuilder().setInsertionPointToEnd(block);
343 cir::YieldOp::create(cgf->getBuilder(),
344 cgf->getBuilder().getUnknownLoc());
345 }
346 cgf->getBuilder().setInsertionPointAfter(cleanupScope);
347 }
348
349 // Destroy the cleanup.
350 cleanup.destroy();
351}
352
354 for (stable_iterator si = getInnermostEHScope(); si != stable_end();) {
355 if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) {
356 if (cleanup->isLifetimeMarker()) {
357 // Skip lifetime markers and continue from the enclosing EH scope
359 continue;
360 }
361 }
362 return true;
363 }
364 return false;
365}
366
367/// The given cleanup block is being deactivated. Configure a cleanup variable
368/// if necessary.
371 mlir::Operation *dominatingIP) {
373
374 assert((scope.isNormalCleanup() || scope.isEHCleanup()) &&
375 "cleanup block is neither normal nor EH?");
376
378 scope.setTestFlagInEHCleanup(scope.isEHCleanup());
379
380 CIRGenBuilderTy &builder = cgf.getBuilder();
381
382 // If the cleanup block doesn't exist yet, create it and set its initial
383 // value to `true`. If we are inside a conditional branch, the value must be
384 // initialized before the conditional branch begins.
385 Address var = scope.getActiveFlag();
386 if (!var.isValid()) {
387 mlir::Location loc = builder.getUnknownLoc();
388
390 loc, "cleanup.isactive");
391 scope.setActiveFlag(var);
392
393 assert(dominatingIP && "no existing variable and no dominating IP!");
394
395 if (cgf.isInConditionalBranch()) {
396 mlir::Value val = builder.getBool(true, loc);
397 cgf.setBeforeOutermostConditional(val, var);
398 } else {
399 mlir::OpBuilder::InsertionGuard guard(builder);
400 builder.setInsertionPoint(dominatingIP);
401 builder.createFlagStore(loc, true, var.getPointer());
402 }
403 }
404
405 // The code above sets the `isActive` flag to `true` as its initial state
406 // at the point where the variable is created. The code below sets it to
407 // `false` at the point where the cleanup is deactivated.
408 mlir::Location loc = builder.getUnknownLoc();
409 builder.createFlagStore(loc, false, var.getPointer());
410}
411
412/// Deactive a cleanup that was created in an active state.
414 mlir::Operation *dominatingIP) {
415 assert(c != ehStack.stable_end() && "deactivating bottom of stack?");
417 assert(scope.isActive() && "double deactivation");
418
419 // If it's the top of the stack, just pop it, but do so only if it belongs
420 // to the current RunCleanupsScope.
421 if (c == ehStack.stable_begin() &&
422 currentCleanupStackDepth.strictlyEncloses(c)) {
423 popCleanupBlock(/*forDeactivation=*/true);
424 return;
425 }
426
427 // Otherwise, follow the general case.
428 setupCleanupBlockDeactivation(*this, c, dominatingIP);
429
430 scope.setActive(false);
431}
432
433static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope,
434 EHScopeStack::Cleanup *cleanup,
436 Address activeFlag) {
437 CIRGenBuilderTy &builder = cgf.getBuilder();
438 mlir::Block &block = cleanupScope.getCleanupRegion().back();
439
440 mlir::OpBuilder::InsertionGuard guard(builder);
441 builder.setInsertionPointToStart(&block);
442
443 // Ask the cleanup to emit itself.
444 assert(cgf.haveInsertPoint() && "expected insertion point");
445
446 if (activeFlag.isValid()) {
447 mlir::Location loc = cleanupScope.getLoc();
448 mlir::Value isActive = builder.createFlagLoad(loc, activeFlag.getPointer());
449 cir::IfOp::create(builder, loc, isActive,
450 /*withElseRegion=*/false,
451 /*thenBuilder=*/
452 [&](mlir::OpBuilder &, mlir::Location) {
453 cleanup->emit(cgf, flags);
454 assert(cgf.haveInsertPoint() &&
455 "cleanup ended with no insertion point?");
456 builder.createYield(loc);
457 });
458 } else {
459 cleanup->emit(cgf, flags);
460 assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
461 }
462
463 mlir::Block &cleanupRegionLastBlock = cleanupScope.getCleanupRegion().back();
464 if (cleanupRegionLastBlock.empty() ||
465 !cleanupRegionLastBlock.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
466 mlir::OpBuilder::InsertionGuard guardCase(builder);
467 builder.setInsertionPointToEnd(&cleanupRegionLastBlock);
468 builder.createYield(cleanupScope.getLoc());
469 }
470}
471
472/// Check whether a cleanup scope body contains any non-yield exits that branch
473/// through the cleanup. These exits branch through the cleanup and require
474/// the normal cleanup to be executed even when the cleanup has been
475/// deactivated.
476static bool bodyHasBranchThroughExits(mlir::Region &bodyRegion) {
477 return bodyRegion
478 .walk([&](mlir::Operation *op) {
480 return mlir::WalkResult::interrupt();
481 return mlir::WalkResult::advance();
482 })
483 .wasInterrupted();
484}
485
486/// Pop a cleanup block from the stack.
487///
488/// \param forDeactivation - When true, this indicates that the cleanup block
489/// is being popped because it was deactivated while at the top of the stack.
490void CIRGenFunction::popCleanupBlock(bool forDeactivation) {
491 assert(!ehStack.empty() && "cleanup stack is empty!");
492 assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
494
495 cir::CleanupScopeOp cleanupScope = scope.getCleanupScopeOp();
496 assert(cleanupScope && "CleanupScopeOp is nullptr");
497
498 bool requiresNormalCleanup = scope.isNormalCleanup();
499 bool requiresEHCleanup = scope.isEHCleanup();
500
501 // When we're popping a cleanup to deactivate it, we need to know if anything
502 // in the cleanup scope body region branches through the cleanup handler
503 // before the entire cleanup scope body has executed. If the cleanup scope
504 // body falls through, we don't want to emit normal cleanup code. However,
505 // if the cleanup body region contains early exits (return or goto), we do
506 // need to execute the normal cleanup when the early exit is taken. To handle
507 // that case, we guard the cleanup with an "active" flag so that it executes
508 // conditionally and set the flag to false when the cleanup body falls
509 // through. Classic codegen tracks this state with "hasBranches" and
510 // "getFixupDepth" on the cleanup scope, but because CIR uses structured
511 // control flow, we need to check for early exits and insert the active
512 // flag handling here. Note that when a cleanup is deactivated while not at
513 // the top of the stack, the active flag gets created in
514 // setupCleanupBlockDeactivation.
515 if (forDeactivation && requiresNormalCleanup) {
516 if (bodyHasBranchThroughExits(cleanupScope.getBodyRegion())) {
517 // The active flag shouldn't exist if the scope was at the top of the
518 // stack when it was deactivated.
519 assert(!scope.getActiveFlag().isValid() && "active flag already set");
520
521 // Create the flag.
522 mlir::Location loc = builder.getUnknownLoc();
524 builder.getBoolTy(), CharUnits::One(), loc, "cleanup.isactive");
525
526 // Initialize the flag to true before the cleanup scope (the point where
527 // the cleanup becomes active).
528 {
529 mlir::OpBuilder::InsertionGuard guard(builder);
530 builder.setInsertionPoint(cleanupScope);
531 builder.createFlagStore(loc, true, activeFlag.getPointer());
532 }
533
534 // Set the flag to false at the end of the cleanup scope body region.
535 assert(builder.getInsertionBlock() ==
536 &cleanupScope.getBodyRegion().back() &&
537 "expected insertion point in cleanup body");
538 builder.createFlagStore(loc, false, activeFlag.getPointer());
539
540 scope.setActiveFlag(activeFlag);
541 scope.setTestFlagInNormalCleanup(true);
542 } else {
543 // If the cleanup was pushed on the stack as normal+eh, downgrade it to
544 // eh-only.
545 if (requiresEHCleanup)
546 cleanupScope.setCleanupKind(cir::CleanupKind::EH);
547 requiresNormalCleanup = false;
548 }
549 }
550
551 Address normalActiveFlag = scope.shouldTestFlagInNormalCleanup()
552 ? scope.getActiveFlag()
554 Address ehActiveFlag = scope.shouldTestFlagInEHCleanup()
555 ? scope.getActiveFlag()
557
558 // If we don't need the cleanup at all, we're done.
559 if (!requiresNormalCleanup && !requiresEHCleanup) {
560 // If we get here, the cleanup scope isn't needed. Rather than try to move
561 // the contents of its body region out of the cleanup and erase it, we just
562 // add a yield to the cleanup region to make it valid but no-op. It will be
563 // erased during canonicalization.
564 mlir::Block &cleanupBlock = cleanupScope.getCleanupRegion().back();
565 if (!cleanupBlock.mightHaveTerminator()) {
566 mlir::OpBuilder::InsertionGuard guard(builder);
567 builder.setInsertionPointToEnd(&cleanupBlock);
568 cir::YieldOp::create(builder, builder.getUnknownLoc());
569 }
570 ehStack.popCleanup();
571 return;
572 }
573
574 // Copy the cleanup emission data out. This uses either a stack
575 // array or malloc'd memory, depending on the size, which is
576 // behavior that SmallVector would provide, if we could use it
577 // here. Unfortunately, if you ask for a SmallVector<char>, the
578 // alignment isn't sufficient.
579 auto *cleanupSource = reinterpret_cast<char *>(scope.getCleanupBuffer());
581 cleanupBufferStack[8 * sizeof(void *)];
582 std::unique_ptr<char[]> cleanupBufferHeap;
583 size_t cleanupSize = scope.getCleanupSize();
585
586 // This is necessary because we are going to deallocate the cleanup
587 // (in popCleanup) before we emit it.
588 if (cleanupSize <= sizeof(cleanupBufferStack)) {
589 memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
590 cleanup = reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferStack);
591 } else {
592 cleanupBufferHeap.reset(new char[cleanupSize]);
593 memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
594 cleanup =
595 reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferHeap.get());
596 }
597
598 EHScopeStack::Cleanup::Flags cleanupFlags;
599 if (scope.isNormalCleanup())
600 cleanupFlags.setIsNormalCleanupKind();
601 if (scope.isEHCleanup())
602 cleanupFlags.setIsEHCleanupKind();
603
604 // Determine the active flag for the cleanup handler.
605 Address cleanupActiveFlag = normalActiveFlag.isValid() ? normalActiveFlag
606 : ehActiveFlag.isValid() ? ehActiveFlag
608
609 // In CIR, the cleanup code is emitted into the cleanup region of the
610 // cir.cleanup.scope op. There is no CFG threading needed — the FlattenCFG
611 // pass handles lowering the structured cleanup scope.
612 ehStack.popCleanup();
613 scope.markEmitted();
614 emitCleanup(*this, cleanupScope, cleanup, cleanupFlags, cleanupActiveFlag);
615}
616
617/// Pops cleanup blocks until the given savepoint is reached.
619 EHScopeStack::stable_iterator oldCleanupStackDepth,
620 ArrayRef<mlir::Value *> valuesToReload) {
621 // If the current stack depth is the same as the cleanup stack depth,
622 // we won't be exiting any cleanup scopes, so we don't need to reload
623 // any values.
624 bool requiresCleanup = false;
625 for (auto it = ehStack.begin(), ie = ehStack.find(oldCleanupStackDepth);
626 it != ie; ++it) {
627 if (isa<EHCleanupScope>(&*it)) {
628 requiresCleanup = true;
629 break;
630 }
631 }
632
633 // If there are values that we need to keep live, spill them now before
634 // we pop the cleanup blocks. These are passed as pointers to mlir::Value
635 // because we're going to replace them with the reloaded value.
636 SmallVector<Address> tempAllocas;
637 if (requiresCleanup) {
638 for (mlir::Value *valPtr : valuesToReload) {
639 mlir::Value val = *valPtr;
640 if (!val)
641 continue;
642
643 // TODO(cir): Check for static allocas.
644
645 Address temp = createDefaultAlignTempAlloca(val.getType(), val.getLoc(),
646 "tmp.exprcleanup");
647 tempAllocas.push_back(temp);
648 builder.createStore(val.getLoc(), val, temp);
649 }
650 }
651
652 // Pop cleanup blocks until we reach the base stack depth for the
653 // current scope.
654 while (ehStack.stable_begin() != oldCleanupStackDepth)
656
657 // Reload the values that we spilled, if necessary.
658 if (requiresCleanup) {
659 for (auto [addr, valPtr] : llvm::zip(tempAllocas, valuesToReload)) {
660 mlir::Location loc = valPtr->getLoc();
661 *valPtr = builder.createLoad(loc, addr);
662 }
663 }
664}
665
666/// Pops cleanup blocks until the given savepoint is reached, then add the
667/// cleanups from the given savepoint in the lifetime-extended cleanups stack.
669 EHScopeStack::stable_iterator oldCleanupStackDepth,
670 size_t oldLifetimeExtendedSize, ArrayRef<mlir::Value *> valuesToReload) {
671 popCleanupBlocks(oldCleanupStackDepth, valuesToReload);
672
673 // Promote deferred lifetime-extended cleanups onto the EH scope stack.
674 for (const PendingCleanupEntry &cleanup : llvm::make_range(
675 lifetimeExtendedCleanupStack.begin() + oldLifetimeExtendedSize,
678 lifetimeExtendedCleanupStack.truncate(oldLifetimeExtendedSize);
679}
static void setupCleanupBlockDeactivation(CIRGenFunction &cgf, EHScopeStack::stable_iterator c, mlir::Operation *dominatingIP)
The given cleanup block is being deactivated.
static bool bodyHasBranchThroughExits(mlir::Region &bodyRegion)
Check whether a cleanup scope body contains any non-yield exits that branch through the cleanup.
static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope, EHScopeStack::Cleanup *cleanup, EHScopeStack::Cleanup::Flags flags, Address activeFlag)
static Decl::Kind getKind(const Decl *D)
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__DEVICE__ void * memcpy(void *__a, const void *__b, size_t __c)
__device__ __2f16 b
__device__ __2f16 float c
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::StoreOp createFlagStore(mlir::Location loc, bool val, mlir::Value dst)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::LoadOp createFlagLoad(mlir::Location loc, mlir::Value addr)
Emit a load from an boolean flag variable.
cir::BoolType getBoolTy()
mlir::Value getPointer() const
Definition Address.h:96
static Address invalid()
Definition Address.h:74
bool isValid() const
Definition Address.h:75
FullExprCleanupScope(CIRGenFunction &cgf, const Expr *subExpr)
void exit(ArrayRef< mlir::Value * > valuesToReload={})
llvm::SmallVector< PendingCleanupEntry > lifetimeExtendedCleanupStack
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
mlir::Block * getCurFunctionEntryBlock()
void setBeforeOutermostConditional(mlir::Value value, Address addr)
ConditionalEvaluation * outermostConditional
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
llvm::SmallVector< PendingCleanupEntry > deferredConditionalCleanupStack
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
void initFullExprCleanupWithFlag(Address activeFlag)
Address createCleanupActiveFlag()
Create an active flag variable for use with conditional cleanups.
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
bool haveInsertPoint() const
True if an insertion point is defined.
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
CIRGenBuilderTy & getBuilder()
void pushPendingCleanupToEHStack(const PendingCleanupEntry &entry)
Promote a single pending cleanup entry onto the EH scope stack.
void popCleanupBlock(bool forDeactivation=false)
Pop a cleanup block from the stack.
EHScopeStack::stable_iterator currentCleanupStackDepth
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
A cleanup scope which generates the cleanup blocks lazily.
void setTestFlagInEHCleanup(bool value)
void setTestFlagInNormalCleanup(bool value)
cir::CleanupScopeOp getCleanupScopeOp()
static size_t getSizeForCleanupSize(size_t size)
Gets the size required for a lazy cleanup scope with the given cleanup-data requirements.
void setActiveFlag(Address var)
bool shouldTestFlagInNormalCleanup() const
void setActive(bool isActive)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
void popCleanup()
Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
iterator find(stable_iterator savePoint) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
bool requiresCatchOrCleanup() const
stable_iterator getInnermostActiveNormalCleanup() const
Represents a C++ temporary.
Definition ExprCXX.h:1463
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
This represents one expression.
Definition Expr.h:112
A (possibly-)qualified type.
Definition TypeBase.h:937
A class that does preorder or postorder depth-first traversal on the entire Clang AST and visits each...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool emitLifetimeMarkers()
A cleanup entry that will be promoted onto the EH scope stack at a later point.