clang 23.0.0git
CIRGenCleanup.cpp
Go to the documentation of this file.
1//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains code dealing with the IR generation for cleanups
10// and related information.
11//
12// A "cleanup" is a piece of code which needs to be executed whenever
13// control transfers out of a particular scope. This can be
14// conditionalized to occur only on exceptional control flow, only on
15// normal control flow, or both.
16//
17//===----------------------------------------------------------------------===//
18
19#include "CIRGenCleanup.h"
20#include "CIRGenFunction.h"
21
24
25using namespace clang;
26using namespace clang::CIRGen;
27
28namespace {
29/// Return true if the expression tree contains an AbstractConditionalOperator
30/// (ternary ?:), which is the only construct whose CIR codegen calls
31/// ConditionalEvaluation::beginEvaluation() and thus causes cleanups to be
32/// deferred via pushFullExprCleanup. Logical &&/|| do NOT call
33/// beginEvaluation(); their branch-local cleanups are handled by LexicalScope.
34class ConditionalEvaluationFinder
35 : public RecursiveASTVisitor<ConditionalEvaluationFinder> {
36 bool foundConditional = false;
37
38public:
39 bool found() const { return foundConditional; }
40
41 bool VisitAbstractConditionalOperator(AbstractConditionalOperator *) {
42 foundConditional = true;
43 return false;
44 }
45
46 // Don't cross evaluation-context boundaries.
47 bool TraverseLambdaExpr(LambdaExpr *) { return true; }
48 bool TraverseBlockExpr(BlockExpr *) { return true; }
49 bool TraverseStmtExpr(StmtExpr *) { return true; }
50};
51} // namespace
52
53//===----------------------------------------------------------------------===//
54// CIRGenFunction cleanup related
55//===----------------------------------------------------------------------===//
56
57/// Emits all the code to cause the given temporary to be cleaned up.
59 QualType tempType, Address ptr) {
61}
62
64 assert(isInConditionalBranch());
65 mlir::Location loc = builder.getUnknownLoc();
66
67 // Place the alloca in the function entry block so it dominates everything,
68 // including both regions of any enclosing cir.cleanup.scope. We can't rely
69 // on the default curLexScope path because we may be inside a ternary branch
70 // whose LexicalScope would capture the alloca.
72 builder.getBoolTy(), CharUnits::One(), loc, "cleanup.cond",
73 /*arraySize=*/nullptr,
74 builder.getBestAllocaInsertPoint(getCurFunctionEntryBlock()));
75
76 // Initialize to false before the outermost conditional.
77 {
78 mlir::OpBuilder::InsertionGuard guard(builder);
79 builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
80 builder.createFlagStore(loc, false, active.getPointer());
81 }
82
83 // Set to true at the current location (inside the conditional branch).
84 builder.createFlagStore(loc, true, active.getPointer());
85
86 return active;
87}
88
90 const Expr *subExpr)
91 : cgf(cgf), cleanups(cgf), scope(nullptr),
92 deferredCleanupStackSize(cgf.deferredConditionalCleanupStack.size()) {
93
94 assert(subExpr && "ExprWithCleanups always has a sub-expression");
95 ConditionalEvaluationFinder finder;
96 finder.TraverseStmt(const_cast<Expr *>(subExpr));
97 if (finder.found()) {
98 mlir::Location loc = cgf.builder.getUnknownLoc();
99 cir::CleanupKind cleanupKind = cgf.getLangOpts().Exceptions
100 ? cir::CleanupKind::All
101 : cir::CleanupKind::Normal;
102 scope = cir::CleanupScopeOp::create(
103 cgf.builder, loc, cleanupKind,
104 /*bodyBuilder=*/
105 [&](mlir::OpBuilder &b, mlir::Location loc) {},
106 /*cleanupBuilder=*/
107 [&](mlir::OpBuilder &b, mlir::Location loc) {});
108 cgf.builder.setInsertionPointToEnd(&scope.getBodyRegion().front());
109 }
110}
111
113 ArrayRef<mlir::Value *> valuesToReload) {
114 assert(!exited && "FullExprCleanupScope::exit called twice");
115 exited = true;
116
117 size_t oldSize = deferredCleanupStackSize;
118 bool hasDeferredCleanups =
119 cgf.deferredConditionalCleanupStack.size() > oldSize;
120
121 if (!scope) {
122 cgf.deferredConditionalCleanupStack.truncate(oldSize);
123 cleanups.forceCleanup(valuesToReload);
124 return;
125 }
126
127 // Spill any values that callers need after the scope is closed.
128 SmallVector<Address> tempAllocas;
129 for (mlir::Value *valPtr : valuesToReload) {
130 mlir::Value val = *valPtr;
131 if (!val) {
132 tempAllocas.push_back(Address::invalid());
133 continue;
134 }
135 Address temp = cgf.createDefaultAlignTempAlloca(val.getType(), val.getLoc(),
136 "tmp.exprcleanup");
137 tempAllocas.push_back(temp);
138 cgf.builder.createStore(val.getLoc(), val, temp);
139 }
140
141 // Pop any EH and lifetime-extended cleanups that were pushed during
142 // the expression (e.g. temporary destructors).
143 cleanups.forceCleanup();
144
145 // Make sure the cleanup scope body region has a terminator.
146 {
147 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
148 mlir::Block &lastBodyBlock = scope.getBodyRegion().back();
149 cgf.builder.setInsertionPointToEnd(&lastBodyBlock);
150 if (lastBodyBlock.empty() ||
151 !lastBodyBlock.back().hasTrait<mlir::OpTrait::IsTerminator>())
152 cgf.builder.createYield(scope.getLoc());
153 }
154
155 // Emit any deferred cleanups.
156 {
157 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
158 mlir::Block &cleanupBlock = scope.getCleanupRegion().front();
159 cgf.builder.setInsertionPointToEnd(&cleanupBlock);
160
161 if (hasDeferredCleanups) {
162 for (const PendingCleanupEntry &entry : llvm::reverse(llvm::make_range(
163 cgf.deferredConditionalCleanupStack.begin() + oldSize,
164 cgf.deferredConditionalCleanupStack.end()))) {
165 if (entry.activeFlag.isValid()) {
166 mlir::Value flag =
167 cgf.builder.createLoad(scope.getLoc(), entry.activeFlag);
168 cir::IfOp::create(
169 cgf.builder, scope.getLoc(), flag, /*withElseRegion=*/false,
170 [&](mlir::OpBuilder &b, mlir::Location loc) {
171 cgf.emitDestroy(entry.addr, entry.type, entry.destroyer);
172 cgf.builder.createYield(loc);
173 });
174 } else {
175 cgf.emitDestroy(entry.addr, entry.type, entry.destroyer);
176 }
177 }
178 }
179 cgf.builder.createYield(scope.getLoc());
180 }
181
182 cgf.deferredConditionalCleanupStack.truncate(oldSize);
183 cgf.builder.setInsertionPointAfter(scope);
184
185 // Reload spilled values now that the builder is after the closed scope.
186 for (auto [addr, valPtr] : llvm::zip(tempAllocas, valuesToReload)) {
187 if (!addr.isValid())
188 continue;
189 *valPtr = cgf.builder.createLoad(valPtr->getLoc(), addr);
190 }
191}
192
193//===----------------------------------------------------------------------===//
194// EHScopeStack
195//===----------------------------------------------------------------------===//
196
197void EHScopeStack::Cleanup::anchor() {}
198
201 stable_iterator si = getInnermostNormalCleanup();
202 stable_iterator se = stable_end();
203 while (si != se) {
204 EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
205 if (cleanup.isActive())
206 return si;
207 si = cleanup.getEnclosingNormalCleanup();
208 }
209 return stable_end();
210}
211
212/// Push an entry of the given size onto this protected-scope stack.
213char *EHScopeStack::allocate(size_t size) {
214 size = llvm::alignTo(size, ScopeStackAlignment);
215 if (!startOfBuffer) {
216 unsigned capacity = llvm::PowerOf2Ceil(std::max<size_t>(size, 1024ul));
217 startOfBuffer = std::make_unique<char[]>(capacity);
218 startOfData = endOfBuffer = startOfBuffer.get() + capacity;
219 } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
220 unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
221 unsigned usedCapacity =
222 currentCapacity - (startOfData - startOfBuffer.get());
223 unsigned requiredCapacity = usedCapacity + size;
224 // We know from the 'else if' condition that requiredCapacity is greater
225 // than currentCapacity.
226 unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
227
228 std::unique_ptr<char[]> newStartOfBuffer =
229 std::make_unique<char[]>(newCapacity);
230 char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
231 char *newStartOfData = newEndOfBuffer - usedCapacity;
232 memcpy(newStartOfData, startOfData, usedCapacity);
233 startOfBuffer.swap(newStartOfBuffer);
234 endOfBuffer = newEndOfBuffer;
235 startOfData = newStartOfData;
236 }
237
238 assert(startOfBuffer.get() + size <= startOfData);
239 startOfData -= size;
240 return startOfData;
241}
242
243void EHScopeStack::deallocate(size_t size) {
244 startOfData += llvm::alignTo(size, ScopeStackAlignment);
245}
246
247void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
248 char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
249 bool isNormalCleanup = kind & NormalCleanup;
250 bool isEHCleanup = kind & EHCleanup;
251 bool isLifetimeMarker = kind & LifetimeMarker;
252 bool skipCleanupScope = false;
253
254 cir::CleanupKind cleanupKind = cir::CleanupKind::All;
255 if (isEHCleanup && cgf->getLangOpts().Exceptions) {
256 cleanupKind =
257 isNormalCleanup ? cir::CleanupKind::All : cir::CleanupKind::EH;
258 } else {
259 // Exceptions are disabled (or no EH flag was requested). Drop the EH
260 // flag so the scope entry stays consistent with the op's cleanup kind.
261 isEHCleanup = false;
262 if (isNormalCleanup)
263 cleanupKind = cir::CleanupKind::Normal;
264 else
265 skipCleanupScope = true;
266 }
267
268 cir::CleanupScopeOp cleanupScope = nullptr;
269 if (!skipCleanupScope) {
270 CIRGenBuilderTy &builder = cgf->getBuilder();
271 mlir::Location loc = builder.getUnknownLoc();
272 cleanupScope = cir::CleanupScopeOp::create(
273 builder, loc, cleanupKind,
274 /*bodyBuilder=*/
275 [&](mlir::OpBuilder &b, mlir::Location loc) {
276 // Terminations will be handled in popCleanup
277 },
278 /*cleanupBuilder=*/
279 [&](mlir::OpBuilder &b, mlir::Location loc) {
280 // Terminations will be handled after emiting cleanup
281 });
282
283 builder.setInsertionPointToEnd(&cleanupScope.getBodyRegion().back());
284 }
285
286 // Per C++ [except.terminate], it is implementation-defined whether none,
287 // some, or all cleanups are called before std::terminate. Thus, when
288 // terminate is the current EH scope, we may skip adding any EH cleanup
289 // scopes.
290 if (innermostEHScope != stable_end() &&
291 find(innermostEHScope)->getKind() == EHScope::Terminate)
292 isEHCleanup = false;
293
294 EHCleanupScope *scope = new (buffer)
295 EHCleanupScope(isNormalCleanup, isEHCleanup, size, cleanupScope,
296 innermostNormalCleanup, innermostEHScope);
297
298 if (isNormalCleanup)
299 innermostNormalCleanup = stable_begin();
300
301 if (isEHCleanup)
302 innermostEHScope = stable_begin();
303
304 if (isLifetimeMarker)
305 cgf->cgm.errorNYI("push lifetime marker cleanup");
306
307 // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
308 if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
309 cgf->getTarget().getCXXABI().isMicrosoft())
310 cgf->cgm.errorNYI("push seh cleanup");
311
312 return scope->getCleanupBuffer();
313}
314
316 assert(!empty() && "popping exception stack when not empty");
317
318 assert(isa<EHCleanupScope>(*begin()));
319 EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
320 innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
321 innermostEHScope = cleanup.getEnclosingEHScope();
322 deallocate(cleanup.getAllocatedSize());
323
324 cir::CleanupScopeOp cleanupScope = cleanup.getCleanupScopeOp();
325 if (cleanupScope) {
326 auto *block = &cleanupScope.getBodyRegion().back();
327 if (!block->mightHaveTerminator()) {
328 mlir::OpBuilder::InsertionGuard guard(cgf->getBuilder());
329 cgf->getBuilder().setInsertionPointToEnd(block);
330 cir::YieldOp::create(cgf->getBuilder(),
331 cgf->getBuilder().getUnknownLoc());
332 }
333 cgf->getBuilder().setInsertionPointAfter(cleanupScope);
334 }
335
336 // Destroy the cleanup.
337 cleanup.destroy();
338}
339
341 for (stable_iterator si = getInnermostEHScope(); si != stable_end();) {
342 if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) {
343 if (cleanup->isLifetimeMarker()) {
344 // Skip lifetime markers and continue from the enclosing EH scope
346 continue;
347 }
348 }
349 return true;
350 }
351 return false;
352}
353
354/// The given cleanup block is being deactivated. Configure a cleanup variable
355/// if necessary.
358 mlir::Operation *dominatingIP) {
360
361 assert((scope.isNormalCleanup() || scope.isEHCleanup()) &&
362 "cleanup block is neither normal nor EH?");
363
364 if (scope.isNormalCleanup())
366
367 if (scope.isEHCleanup())
369
370 CIRGenBuilderTy &builder = cgf.getBuilder();
371
372 // If the cleanup block doesn't exist yet, create it and set its initial
373 // value to `true`. If we are inside a conditional branch, the value must be
374 // initialized before the conditional branch begins.
375 Address var = scope.getActiveFlag();
376 if (!var.isValid()) {
377 mlir::Location loc = builder.getUnknownLoc();
378
380 loc, "cleanup.isactive");
381 scope.setActiveFlag(var);
382
383 assert(dominatingIP && "no existing variable and no dominating IP!");
384
385 if (cgf.isInConditionalBranch()) {
386 mlir::Value val = builder.getBool(true, loc);
387 cgf.setBeforeOutermostConditional(val, var);
388 } else {
389 mlir::OpBuilder::InsertionGuard guard(builder);
390 builder.setInsertionPoint(dominatingIP);
391 builder.createFlagStore(loc, true, var.getPointer());
392 }
393 }
394
395 // The code above sets the `isActive` flag to `true` as its initial state
396 // at the point where the variable is created. The code below sets it to
397 // `false` at the point where the cleanup is deactivated.
398 mlir::Location loc = builder.getUnknownLoc();
399 builder.createFlagStore(loc, false, var.getPointer());
400}
401
402/// Deactive a cleanup that was created in an active state.
404 mlir::Operation *dominatingIP) {
405 assert(c != ehStack.stable_end() && "deactivating bottom of stack?");
407 assert(scope.isActive() && "double deactivation");
408
409 // If it's the top of the stack, just pop it, but do so only if it belongs
410 // to the current RunCleanupsScope.
411 if (c == ehStack.stable_begin() &&
412 currentCleanupStackDepth.strictlyEncloses(c)) {
413 popCleanupBlock(/*forDeactivation=*/true);
414 return;
415 }
416
417 // Otherwise, follow the general case.
418 setupCleanupBlockDeactivation(*this, c, dominatingIP);
419
420 scope.setActive(false);
421}
422
423static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope,
424 EHScopeStack::Cleanup *cleanup,
426 Address activeFlag) {
427 CIRGenBuilderTy &builder = cgf.getBuilder();
428 mlir::Block &block = cleanupScope.getCleanupRegion().back();
429
430 mlir::OpBuilder::InsertionGuard guard(builder);
431 builder.setInsertionPointToStart(&block);
432
433 // Ask the cleanup to emit itself.
434 assert(cgf.haveInsertPoint() && "expected insertion point");
435
436 if (activeFlag.isValid()) {
437 mlir::Location loc = cleanupScope.getLoc();
438 mlir::Value isActive = builder.createFlagLoad(loc, activeFlag.getPointer());
439 cir::IfOp::create(builder, loc, isActive,
440 /*withElseRegion=*/false,
441 /*thenBuilder=*/
442 [&](mlir::OpBuilder &, mlir::Location) {
443 cleanup->emit(cgf, flags);
444 assert(cgf.haveInsertPoint() &&
445 "cleanup ended with no insertion point?");
446 builder.createYield(loc);
447 });
448 } else {
449 cleanup->emit(cgf, flags);
450 assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
451 }
452
453 mlir::Block &cleanupRegionLastBlock = cleanupScope.getCleanupRegion().back();
454 if (cleanupRegionLastBlock.empty() ||
455 !cleanupRegionLastBlock.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
456 mlir::OpBuilder::InsertionGuard guardCase(builder);
457 builder.setInsertionPointToEnd(&cleanupRegionLastBlock);
458 builder.createYield(cleanupScope.getLoc());
459 }
460}
461
462/// Check whether a cleanup scope body contains any non-yield exits that branch
463/// through the cleanup. These exits branch through the cleanup and require
464/// the normal cleanup to be executed even when the cleanup has been
465/// deactivated.
466static bool bodyHasBranchThroughExits(mlir::Region &bodyRegion) {
467 return bodyRegion
468 .walk([&](mlir::Operation *op) {
470 return mlir::WalkResult::interrupt();
471 return mlir::WalkResult::advance();
472 })
473 .wasInterrupted();
474}
475
476/// Pop a cleanup block from the stack.
477///
478/// \param forDeactivation - When true, this indicates that the cleanup block
479/// is being popped because it was deactivated while at the top of the stack.
480void CIRGenFunction::popCleanupBlock(bool forDeactivation) {
481 assert(!ehStack.empty() && "cleanup stack is empty!");
482 assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
484
485 cir::CleanupScopeOp cleanupScope = scope.getCleanupScopeOp();
486 assert(cleanupScope && "CleanupScopeOp is nullptr");
487
488 bool requiresNormalCleanup = scope.isNormalCleanup();
489 bool requiresEHCleanup = scope.isEHCleanup();
490
491 // When we're popping a cleanup to deactivate it, we need to know if anything
492 // in the cleanup scope body region branches through the cleanup handler
493 // before the entire cleanup scope body has executed. If the cleanup scope
494 // body falls through, we don't want to emit normal cleanup code. However,
495 // if the cleanup body region contains early exits (return or goto), we do
496 // need to execute the normal cleanup when the early exit is taken. To handle
497 // that case, we guard the cleanup with an "active" flag so that it executes
498 // conditionally and set the flag to false when the cleanup body falls
499 // through. Classic codegen tracks this state with "hasBranches" and
500 // "getFixupDepth" on the cleanup scope, but because CIR uses structured
501 // control flow, we need to check for early exits and insert the active
502 // flag handling here. Note that when a cleanup is deactivated while not at
503 // the top of the stack, the active flag gets created in
504 // setupCleanupBlockDeactivation.
505 if (forDeactivation && requiresNormalCleanup) {
506 if (bodyHasBranchThroughExits(cleanupScope.getBodyRegion())) {
507 // The active flag shouldn't exist if the scope was at the top of the
508 // stack when it was deactivated.
509 assert(!scope.getActiveFlag().isValid() && "active flag already set");
510
511 // Create the flag.
512 mlir::Location loc = builder.getUnknownLoc();
514 builder.getBoolTy(), CharUnits::One(), loc, "cleanup.isactive");
515
516 // Initialize the flag to true before the cleanup scope (the point where
517 // the cleanup becomes active).
518 {
519 mlir::OpBuilder::InsertionGuard guard(builder);
520 builder.setInsertionPoint(cleanupScope);
521 builder.createFlagStore(loc, true, activeFlag.getPointer());
522 }
523
524 // Set the flag to false at the end of the cleanup scope body region.
525 assert(builder.getInsertionBlock() ==
526 &cleanupScope.getBodyRegion().back() &&
527 "expected insertion point in cleanup body");
528 builder.createFlagStore(loc, false, activeFlag.getPointer());
529
530 scope.setActiveFlag(activeFlag);
532 } else {
533 // If the cleanup was pushed on the stack as normal+eh, downgrade it to
534 // eh-only.
535 if (requiresEHCleanup)
536 cleanupScope.setCleanupKind(cir::CleanupKind::EH);
537 requiresNormalCleanup = false;
538 }
539 }
540
541 Address normalActiveFlag = scope.shouldTestFlagInNormalCleanup()
542 ? scope.getActiveFlag()
544 Address ehActiveFlag = scope.shouldTestFlagInEHCleanup()
545 ? scope.getActiveFlag()
547
548 // If we don't need the cleanup at all, we're done.
549 if (!requiresNormalCleanup && !requiresEHCleanup) {
550 // If we get here, the cleanup scope isn't needed. Rather than try to move
551 // the contents of its body region out of the cleanup and erase it, we just
552 // add a yield to the cleanup region to make it valid but no-op. It will be
553 // erased during canonicalization.
554 mlir::Block &cleanupBlock = cleanupScope.getCleanupRegion().back();
555 if (!cleanupBlock.mightHaveTerminator()) {
556 mlir::OpBuilder::InsertionGuard guard(builder);
557 builder.setInsertionPointToEnd(&cleanupBlock);
558 cir::YieldOp::create(builder, builder.getUnknownLoc());
559 }
560 ehStack.popCleanup();
561 return;
562 }
563
564 // Copy the cleanup emission data out. This uses either a stack
565 // array or malloc'd memory, depending on the size, which is
566 // behavior that SmallVector would provide, if we could use it
567 // here. Unfortunately, if you ask for a SmallVector<char>, the
568 // alignment isn't sufficient.
569 auto *cleanupSource = reinterpret_cast<char *>(scope.getCleanupBuffer());
571 cleanupBufferStack[8 * sizeof(void *)];
572 std::unique_ptr<char[]> cleanupBufferHeap;
573 size_t cleanupSize = scope.getCleanupSize();
575
576 // This is necessary because we are going to deallocate the cleanup
577 // (in popCleanup) before we emit it.
578 if (cleanupSize <= sizeof(cleanupBufferStack)) {
579 memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
580 cleanup = reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferStack);
581 } else {
582 cleanupBufferHeap.reset(new char[cleanupSize]);
583 memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
584 cleanup =
585 reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferHeap.get());
586 }
587
588 EHScopeStack::Cleanup::Flags cleanupFlags;
589 if (scope.isNormalCleanup())
590 cleanupFlags.setIsNormalCleanupKind();
591 if (scope.isEHCleanup())
592 cleanupFlags.setIsEHCleanupKind();
593
594 // Determine the active flag for the cleanup handler.
595 Address cleanupActiveFlag = normalActiveFlag.isValid() ? normalActiveFlag
596 : ehActiveFlag.isValid() ? ehActiveFlag
598
599 // In CIR, the cleanup code is emitted into the cleanup region of the
600 // cir.cleanup.scope op. There is no CFG threading needed — the FlattenCFG
601 // pass handles lowering the structured cleanup scope.
602 ehStack.popCleanup();
603 scope.markEmitted();
604 emitCleanup(*this, cleanupScope, cleanup, cleanupFlags, cleanupActiveFlag);
605}
606
607/// Pops cleanup blocks until the given savepoint is reached.
609 EHScopeStack::stable_iterator oldCleanupStackDepth,
610 ArrayRef<mlir::Value *> valuesToReload) {
611 // If the current stack depth is the same as the cleanup stack depth,
612 // we won't be exiting any cleanup scopes, so we don't need to reload
613 // any values.
614 bool requiresCleanup = false;
615 for (auto it = ehStack.begin(), ie = ehStack.find(oldCleanupStackDepth);
616 it != ie; ++it) {
617 if (isa<EHCleanupScope>(&*it)) {
618 requiresCleanup = true;
619 break;
620 }
621 }
622
623 // If there are values that we need to keep live, spill them now before
624 // we pop the cleanup blocks. These are passed as pointers to mlir::Value
625 // because we're going to replace them with the reloaded value.
626 SmallVector<Address> tempAllocas;
627 if (requiresCleanup) {
628 for (mlir::Value *valPtr : valuesToReload) {
629 mlir::Value val = *valPtr;
630 if (!val)
631 continue;
632
633 // TODO(cir): Check for static allocas.
634
635 Address temp = createDefaultAlignTempAlloca(val.getType(), val.getLoc(),
636 "tmp.exprcleanup");
637 tempAllocas.push_back(temp);
638 builder.createStore(val.getLoc(), val, temp);
639 }
640 }
641
642 // Pop cleanup blocks until we reach the base stack depth for the
643 // current scope.
644 while (ehStack.stable_begin() != oldCleanupStackDepth)
646
647 // Reload the values that we spilled, if necessary.
648 if (requiresCleanup) {
649 for (auto [addr, valPtr] : llvm::zip(tempAllocas, valuesToReload)) {
650 mlir::Location loc = valPtr->getLoc();
651 *valPtr = builder.createLoad(loc, addr);
652 }
653 }
654}
655
656/// Pops cleanup blocks until the given savepoint is reached, then add the
657/// cleanups from the given savepoint in the lifetime-extended cleanups stack.
659 EHScopeStack::stable_iterator oldCleanupStackDepth,
660 size_t oldLifetimeExtendedSize, ArrayRef<mlir::Value *> valuesToReload) {
661 popCleanupBlocks(oldCleanupStackDepth, valuesToReload);
662
663 // Promote deferred lifetime-extended cleanups onto the EH scope stack.
664 for (const PendingCleanupEntry &cleanup : llvm::make_range(
665 lifetimeExtendedCleanupStack.begin() + oldLifetimeExtendedSize,
668 lifetimeExtendedCleanupStack.truncate(oldLifetimeExtendedSize);
669}
static void setupCleanupBlockDeactivation(CIRGenFunction &cgf, EHScopeStack::stable_iterator c, mlir::Operation *dominatingIP)
The given cleanup block is being deactivated.
static bool bodyHasBranchThroughExits(mlir::Region &bodyRegion)
Check whether a cleanup scope body contains any non-yield exits that branch through the cleanup.
static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope, EHScopeStack::Cleanup *cleanup, EHScopeStack::Cleanup::Flags flags, Address activeFlag)
static Decl::Kind getKind(const Decl *D)
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__DEVICE__ void * memcpy(void *__a, const void *__b, size_t __c)
__device__ __2f16 b
__device__ __2f16 float c
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::StoreOp createFlagStore(mlir::Location loc, bool val, mlir::Value dst)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::LoadOp createFlagLoad(mlir::Location loc, mlir::Value addr)
Emit a load from an boolean flag variable.
cir::BoolType getBoolTy()
mlir::Value getPointer() const
Definition Address.h:96
static Address invalid()
Definition Address.h:74
bool isValid() const
Definition Address.h:75
FullExprCleanupScope(CIRGenFunction &cgf, const Expr *subExpr)
void exit(ArrayRef< mlir::Value * > valuesToReload={})
llvm::SmallVector< PendingCleanupEntry > lifetimeExtendedCleanupStack
mlir::Block * getCurFunctionEntryBlock()
void setBeforeOutermostConditional(mlir::Value value, Address addr)
ConditionalEvaluation * outermostConditional
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
llvm::SmallVector< PendingCleanupEntry > deferredConditionalCleanupStack
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
Address createCleanupActiveFlag()
Create an active flag variable for use with conditional cleanups.
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
bool haveInsertPoint() const
True if an insertion point is defined.
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
CIRGenBuilderTy & getBuilder()
void pushPendingCleanupToEHStack(const PendingCleanupEntry &entry)
Promote a single pending cleanup entry onto the EH scope stack.
void popCleanupBlock(bool forDeactivation=false)
Pop a cleanup block from the stack.
EHScopeStack::stable_iterator currentCleanupStackDepth
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
A cleanup scope which generates the cleanup blocks lazily.
cir::CleanupScopeOp getCleanupScopeOp()
static size_t getSizeForCleanupSize(size_t size)
Gets the size required for a lazy cleanup scope with the given cleanup-data requirements.
void setActiveFlag(Address var)
bool shouldTestFlagInNormalCleanup() const
void setActive(bool isActive)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
void popCleanup()
Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
iterator find(stable_iterator savePoint) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
bool requiresCatchOrCleanup() const
stable_iterator getInnermostActiveNormalCleanup() const
Represents a C++ temporary.
Definition ExprCXX.h:1463
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
This represents one expression.
Definition Expr.h:112
A (possibly-)qualified type.
Definition TypeBase.h:937
A class that does preorder or postorder depth-first traversal on the entire Clang AST and visits each...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool emitLifetimeMarkers()
A cleanup entry that will be promoted onto the EH scope stack at a later point.