clang 22.0.0git
CIRGenCleanup.cpp
Go to the documentation of this file.
1//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains code dealing with the IR generation for cleanups
10// and related information.
11//
12// A "cleanup" is a piece of code which needs to be executed whenever
13// control transfers out of a particular scope. This can be
14// conditionalized to occur only on exceptional control flow, only on
15// normal control flow, or both.
16//
17//===----------------------------------------------------------------------===//
18
19#include "CIRGenCleanup.h"
20#include "CIRGenFunction.h"
21
23
24using namespace clang;
25using namespace clang::CIRGen;
26
27//===----------------------------------------------------------------------===//
28// CIRGenFunction cleanup related
29//===----------------------------------------------------------------------===//
30
31/// Build a unconditional branch to the lexical scope cleanup block
32/// or with the labeled blocked if already solved.
33///
34/// Track on scope basis, goto's we need to fix later.
35cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location loc,
36 JumpDest dest) {
37 // Insert a branch: to the cleanup block (unsolved) or to the already
38 // materialized label. Keep track of unsolved goto's.
39 assert(dest.getBlock() && "assumes incoming valid dest");
40 auto brOp = cir::BrOp::create(builder, loc, dest.getBlock());
41
42 // Calculate the innermost active normal cleanup.
44 ehStack.getInnermostActiveNormalCleanup();
45
46 // If we're not in an active normal cleanup scope, or if the
47 // destination scope is within the innermost active normal cleanup
48 // scope, we don't need to worry about fixups.
49 if (topCleanup == ehStack.stable_end() ||
50 topCleanup.encloses(dest.getScopeDepth())) { // works for invalid
51 // FIXME(cir): should we clear insertion point here?
52 return brOp;
53 }
54
55 // If we can't resolve the destination cleanup scope, just add this
56 // to the current cleanup scope as a branch fixup.
57 if (!dest.getScopeDepth().isValid()) {
58 BranchFixup &fixup = ehStack.addBranchFixup();
59 fixup.destination = dest.getBlock();
60 fixup.destinationIndex = dest.getDestIndex();
61 fixup.initialBranch = brOp;
62 fixup.optimisticBranchBlock = nullptr;
63 // FIXME(cir): should we clear insertion point here?
64 return brOp;
65 }
66
67 cgm.errorNYI(loc, "emitBranchThroughCleanup: valid destination scope depth");
68 return brOp;
69}
70
71/// Emits all the code to cause the given temporary to be cleaned up.
73 QualType tempType, Address ptr) {
75}
76
77//===----------------------------------------------------------------------===//
78// EHScopeStack
79//===----------------------------------------------------------------------===//
80
81void EHScopeStack::Cleanup::anchor() {}
82
85 stable_iterator si = getInnermostNormalCleanup();
86 stable_iterator se = stable_end();
87 while (si != se) {
88 EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
89 if (cleanup.isActive())
90 return si;
91 si = cleanup.getEnclosingNormalCleanup();
92 }
93 return stable_end();
94}
95
96/// Push an entry of the given size onto this protected-scope stack.
97char *EHScopeStack::allocate(size_t size) {
98 size = llvm::alignTo(size, ScopeStackAlignment);
99 if (!startOfBuffer) {
100 unsigned capacity = llvm::PowerOf2Ceil(std::max(size, 1024ul));
101 startOfBuffer = std::make_unique<char[]>(capacity);
102 startOfData = endOfBuffer = startOfBuffer.get() + capacity;
103 } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
104 unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
105 unsigned usedCapacity =
106 currentCapacity - (startOfData - startOfBuffer.get());
107 unsigned requiredCapacity = usedCapacity + size;
108 // We know from the 'else if' condition that requiredCapacity is greater
109 // than currentCapacity.
110 unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
111
112 std::unique_ptr<char[]> newStartOfBuffer =
113 std::make_unique<char[]>(newCapacity);
114 char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
115 char *newStartOfData = newEndOfBuffer - usedCapacity;
116 memcpy(newStartOfData, startOfData, usedCapacity);
117 startOfBuffer.swap(newStartOfBuffer);
118 endOfBuffer = newEndOfBuffer;
119 startOfData = newStartOfData;
120 }
121
122 assert(startOfBuffer.get() + size <= startOfData);
123 startOfData -= size;
124 return startOfData;
125}
126
127void EHScopeStack::deallocate(size_t size) {
128 startOfData += llvm::alignTo(size, ScopeStackAlignment);
129}
130
131/// Remove any 'null' fixups on the stack. However, we can't pop more
132/// fixups than the fixup depth on the innermost normal cleanup, or
133/// else fixups that we try to add to that cleanup will end up in the
134/// wrong place. We *could* try to shrink fixup depths, but that's
135/// actually a lot of work for little benefit.
137 // We expect this to only be called when there's still an innermost
138 // normal cleanup; otherwise there really shouldn't be any fixups.
139 cgf->cgm.errorNYI("popNullFixups");
140}
141
142void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
143 char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
144 bool isNormalCleanup = kind & NormalCleanup;
145 bool isEHCleanup = kind & EHCleanup;
146 bool isLifetimeMarker = kind & LifetimeMarker;
147
149
150 EHCleanupScope *scope = new (buffer) EHCleanupScope(
151 size, branchFixups.size(), innermostNormalCleanup, innermostEHScope);
152
153 if (isNormalCleanup)
154 innermostNormalCleanup = stable_begin();
155
156 if (isLifetimeMarker)
157 cgf->cgm.errorNYI("push lifetime marker cleanup");
158
159 // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
160 if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
161 cgf->getTarget().getCXXABI().isMicrosoft())
162 cgf->cgm.errorNYI("push seh cleanup");
163
164 return scope->getCleanupBuffer();
165}
166
168 assert(!empty() && "popping exception stack when not empty");
169
170 assert(isa<EHCleanupScope>(*begin()));
171 EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
172 innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
173 deallocate(cleanup.getAllocatedSize());
174
175 // Destroy the cleanup.
176 cleanup.destroy();
177
178 // Check whether we can shrink the branch-fixups stack.
179 if (!branchFixups.empty()) {
180 // If we no longer have any normal cleanups, all the fixups are
181 // complete.
182 if (!hasNormalCleanups()) {
183 branchFixups.clear();
184 } else {
185 // Otherwise we can still trim out unnecessary nulls.
187 }
188 }
189}
190
192 for (stable_iterator si = getInnermostEHScope(); si != stable_end();) {
193 if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) {
194 if (cleanup->isLifetimeMarker()) {
195 // Skip lifetime markers and continue from the enclosing EH scope
197 continue;
198 }
199 }
200 return true;
201 }
202 return false;
203}
204
205EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
206 char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
207 EHCatchScope *scope =
208 new (buffer) EHCatchScope(numHandlers, innermostEHScope);
209 innermostEHScope = stable_begin();
210 return scope;
211}
212
213static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
214 // Ask the cleanup to emit itself.
215 assert(cgf.haveInsertPoint() && "expected insertion point");
217 cleanup->emit(cgf);
218 assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
219}
220
221static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
222 EHCleanupScope &scope) {
223 assert(scope.isNormalCleanup());
224 mlir::Block *entry = scope.getNormalBlock();
225 if (!entry) {
226 mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
228 scope.setNormalBlock(entry);
229 }
230 return entry;
231}
232
233/// Pops a cleanup block. If the block includes a normal cleanup, the
234/// current insertion point is threaded through the cleanup, as are
235/// any branch fixups on the cleanup.
237 assert(!ehStack.empty() && "cleanup stack is empty!");
238 assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
240 assert(scope.getFixupDepth() <= ehStack.getNumBranchFixups());
241
242 // Remember activation information.
243 bool isActive = scope.isActive();
244
245 // - whether there are branch fix-ups through this cleanup
246 unsigned fixupDepth = scope.getFixupDepth();
247 bool hasFixups = ehStack.getNumBranchFixups() != fixupDepth;
248
249 // - whether there's a fallthrough
250 mlir::Block *fallthroughSource = builder.getInsertionBlock();
251 bool hasFallthrough = fallthroughSource != nullptr && isActive;
252
253 bool requiresNormalCleanup =
254 scope.isNormalCleanup() && (hasFixups || hasFallthrough);
255
256 // If we don't need the cleanup at all, we're done.
258 if (!requiresNormalCleanup) {
259 ehStack.popCleanup();
260 return;
261 }
262
263 // Copy the cleanup emission data out. This uses either a stack
264 // array or malloc'd memory, depending on the size, which is
265 // behavior that SmallVector would provide, if we could use it
266 // here. Unfortunately, if you ask for a SmallVector<char>, the
267 // alignment isn't sufficient.
268 auto *cleanupSource = reinterpret_cast<char *>(scope.getCleanupBuffer());
270 cleanupBufferStack[8 * sizeof(void *)];
271 std::unique_ptr<char[]> cleanupBufferHeap;
272 size_t cleanupSize = scope.getCleanupSize();
274
275 // This is necessary because we are going to deallocate the cleanup
276 // (in popCleanup) before we emit it.
277 if (cleanupSize <= sizeof(cleanupBufferStack)) {
278 memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
279 cleanup = reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferStack);
280 } else {
281 cleanupBufferHeap.reset(new char[cleanupSize]);
282 memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
283 cleanup =
284 reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferHeap.get());
285 }
286
288
289 // If we have a fallthrough and no other need for the cleanup,
290 // emit it directly.
291 if (hasFallthrough && !hasFixups) {
293 ehStack.popCleanup();
294 scope.markEmitted();
295 emitCleanup(*this, cleanup);
296 } else {
297 // Otherwise, the best approach is to thread everything through
298 // the cleanup block and then try to clean up after ourselves.
299
300 // Force the entry block to exist.
301 mlir::Block *normalEntry = createNormalEntry(*this, scope);
302
303 // I. Set up the fallthrough edge in.
304 mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
305
306 // If there's a fallthrough, we need to store the cleanup
307 // destination index. For fall-throughs this is always zero.
308 if (hasFallthrough) {
310
311 } else if (fallthroughSource) {
312 // Otherwise, save and clear the IP if we don't have fallthrough
313 // because the cleanup is inactive.
314 assert(!isActive && "source without fallthrough for active cleanup");
315 savedInactiveFallthroughIP = builder.saveInsertionPoint();
316 }
317
318 // II. Emit the entry block. This implicitly branches to it if
319 // we have fallthrough. All the fixups and existing branches
320 // should already be branched to it.
321 builder.setInsertionPointToEnd(normalEntry);
322
323 // intercept normal cleanup to mark SEH scope end
325
326 // III. Figure out where we're going and build the cleanup
327 // epilogue.
328 bool hasEnclosingCleanups =
329 (scope.getEnclosingNormalCleanup() != ehStack.stable_end());
330
331 // Compute the branch-through dest if we need it:
332 // - if there are branch-throughs threaded through the scope
333 // - if fall-through is a branch-through
334 // - if there are fixups that will be optimistically forwarded
335 // to the enclosing cleanup
337 if (hasFixups && hasEnclosingCleanups)
338 cgm.errorNYI("cleanup branch-through dest");
339
340 mlir::Block *fallthroughDest = nullptr;
341
342 // If there's exactly one branch-after and no other threads,
343 // we can route it without a switch.
344 // Skip for SEH, since ExitSwitch is used to generate code to indicate
345 // abnormal termination. (SEH: Except _leave and fall-through at
346 // the end, all other exits in a _try (return/goto/continue/break)
347 // are considered as abnormal terminations, using NormalCleanupDestSlot
348 // to indicate abnormal termination)
351
352 // IV. Pop the cleanup and emit it.
353 scope.markEmitted();
354 ehStack.popCleanup();
355 assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
356
357 emitCleanup(*this, cleanup);
358
359 // Append the prepared cleanup prologue from above.
361
362 // Optimistically hope that any fixups will continue falling through.
363 if (fixupDepth != ehStack.getNumBranchFixups())
364 cgm.errorNYI("cleanup fixup depth mismatch");
365
366 // V. Set up the fallthrough edge out.
367
368 // Case 1: a fallthrough source exists but doesn't branch to the
369 // cleanup because the cleanup is inactive.
370 if (!hasFallthrough && fallthroughSource) {
371 // Prebranched fallthrough was forwarded earlier.
372 // Non-prebranched fallthrough doesn't need to be forwarded.
373 // Either way, all we need to do is restore the IP we cleared before.
374 assert(!isActive);
375 cgm.errorNYI("cleanup inactive fallthrough");
376
377 // Case 2: a fallthrough source exists and should branch to the
378 // cleanup, but we're not supposed to branch through to the next
379 // cleanup.
380 } else if (hasFallthrough && fallthroughDest) {
381 cgm.errorNYI("cleanup fallthrough destination");
382
383 // Case 3: a fallthrough source exists and should branch to the
384 // cleanup and then through to the next.
385 } else if (hasFallthrough) {
386 // Everything is already set up for this.
387
388 // Case 4: no fallthrough source exists.
389 } else {
390 // FIXME(cir): should we clear insertion point here?
391 }
392
393 // VI. Assorted cleaning.
394
395 // Check whether we can merge NormalEntry into a single predecessor.
396 // This might invalidate (non-IR) pointers to NormalEntry.
397 //
398 // If it did invalidate those pointers, and normalEntry was the same
399 // as NormalExit, go back and patch up the fixups.
401 }
402}
403
404/// Pops cleanup blocks until the given savepoint is reached.
406 EHScopeStack::stable_iterator oldCleanupStackDepth) {
408
409 // Pop cleanup blocks until we reach the base stack depth for the
410 // current scope.
411 while (ehStack.stable_begin() != oldCleanupStackDepth) {
413 }
414}
static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup)
static mlir::Block * createNormalEntry(CIRGenFunction &cgf, EHCleanupScope &scope)
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
__DEVICE__ void * memcpy(void *__a, const void *__b, size_t __c)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
bool haveInsertPoint() const
True if an insertion point is defined.
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
CIRGenBuilderTy & getBuilder()
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
void popCleanupBlock()
Pops a cleanup block.
static size_t getSizeForNumHandlers(unsigned n)
A cleanup scope which generates the cleanup blocks lazily.
mlir::Block * getNormalBlock() const
static size_t getSizeForCleanupSize(size_t size)
Gets the size required for a lazy cleanup scope with the given cleanup-data requirements.
EHScopeStack::stable_iterator getEnclosingNormalCleanup() const
void setNormalBlock(mlir::Block *bb)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
bool encloses(stable_iterator other) const
Returns true if this scope encloses I.
void popCleanup()
Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
iterator find(stable_iterator savePoint) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
bool empty() const
Determines whether the exception-scopes stack is empty.
void popNullFixups()
Pops lazily-removed fixups from the end of the list.
class EHCatchScope * pushCatch(unsigned numHandlers)
Push a set of catch handlers on the stack.
bool requiresCatchOrCleanup() const
stable_iterator getInnermostActiveNormalCleanup() const
stable_iterator getInnermostEHScope() const
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
iterator begin() const
Returns an iterator pointing to the innermost EH scope.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Represents a C++ temporary.
Definition ExprCXX.h:1459
A (possibly-)qualified type.
Definition TypeBase.h:937
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool cleanupBranchThrough()
static bool emitLifetimeMarkers()
static bool ehCleanupFlags()
static bool ehCleanupScopeRequiresEHCleanup()
static bool ehstackBranches()
static bool innermostEHScope()
static bool ehCleanupHasPrebranchedFallthrough()
static bool cleanupAppendInsts()
static bool simplifyCleanupEntry()
cir::BrOp initialBranch
The initial branch of the fixup.
mlir::Block * destination
The ultimate destination of the branch.
mlir::Block * optimisticBranchBlock
The block containing the terminator which needs to be modified into a switch if this fixup is resolve...
unsigned destinationIndex
The destination index value.
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)