clang 23.0.0git
CIRGenCleanup.cpp
Go to the documentation of this file.
1//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains code dealing with the IR generation for cleanups
10// and related information.
11//
12// A "cleanup" is a piece of code which needs to be executed whenever
13// control transfers out of a particular scope. This can be
14// conditionalized to occur only on exceptional control flow, only on
15// normal control flow, or both.
16//
17//===----------------------------------------------------------------------===//
18
19#include "CIRGenCleanup.h"
20#include "CIRGenFunction.h"
21
23
24using namespace clang;
25using namespace clang::CIRGen;
26
27//===----------------------------------------------------------------------===//
28// CIRGenFunction cleanup related
29//===----------------------------------------------------------------------===//
30
31/// Build a unconditional branch to the lexical scope cleanup block
32/// or with the labeled blocked if already solved.
33///
34/// Track on scope basis, goto's we need to fix later.
35cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location loc,
36 JumpDest dest) {
37 // Insert a branch: to the cleanup block (unsolved) or to the already
38 // materialized label. Keep track of unsolved goto's.
39 assert(dest.getBlock() && "assumes incoming valid dest");
40 auto brOp = cir::BrOp::create(builder, loc, dest.getBlock());
41
42 // Calculate the innermost active normal cleanup.
44 ehStack.getInnermostActiveNormalCleanup();
45
46 // If we're not in an active normal cleanup scope, or if the
47 // destination scope is within the innermost active normal cleanup
48 // scope, we don't need to worry about fixups.
49 if (topCleanup == ehStack.stable_end() ||
50 topCleanup.encloses(dest.getScopeDepth())) { // works for invalid
51 // FIXME(cir): should we clear insertion point here?
52 return brOp;
53 }
54
55 // If we can't resolve the destination cleanup scope, just add this
56 // to the current cleanup scope as a branch fixup.
57 if (!dest.getScopeDepth().isValid()) {
58 BranchFixup &fixup = ehStack.addBranchFixup();
59 fixup.destination = dest.getBlock();
60 fixup.destinationIndex = dest.getDestIndex();
61 fixup.initialBranch = brOp;
62 fixup.optimisticBranchBlock = nullptr;
63 // FIXME(cir): should we clear insertion point here?
64 return brOp;
65 }
66
67 cgm.errorNYI(loc, "emitBranchThroughCleanup: valid destination scope depth");
68 return brOp;
69}
70
71/// Emits all the code to cause the given temporary to be cleaned up.
73 QualType tempType, Address ptr) {
75}
76
77//===----------------------------------------------------------------------===//
78// EHScopeStack
79//===----------------------------------------------------------------------===//
80
81void EHScopeStack::Cleanup::anchor() {}
82
85 stable_iterator si = getInnermostNormalCleanup();
86 stable_iterator se = stable_end();
87 while (si != se) {
88 EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
89 if (cleanup.isActive())
90 return si;
91 si = cleanup.getEnclosingNormalCleanup();
92 }
93 return stable_end();
94}
95
96/// Push an entry of the given size onto this protected-scope stack.
97char *EHScopeStack::allocate(size_t size) {
98 size = llvm::alignTo(size, ScopeStackAlignment);
99 if (!startOfBuffer) {
100 unsigned capacity = llvm::PowerOf2Ceil(std::max<size_t>(size, 1024ul));
101 startOfBuffer = std::make_unique<char[]>(capacity);
102 startOfData = endOfBuffer = startOfBuffer.get() + capacity;
103 } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
104 unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
105 unsigned usedCapacity =
106 currentCapacity - (startOfData - startOfBuffer.get());
107 unsigned requiredCapacity = usedCapacity + size;
108 // We know from the 'else if' condition that requiredCapacity is greater
109 // than currentCapacity.
110 unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
111
112 std::unique_ptr<char[]> newStartOfBuffer =
113 std::make_unique<char[]>(newCapacity);
114 char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
115 char *newStartOfData = newEndOfBuffer - usedCapacity;
116 memcpy(newStartOfData, startOfData, usedCapacity);
117 startOfBuffer.swap(newStartOfBuffer);
118 endOfBuffer = newEndOfBuffer;
119 startOfData = newStartOfData;
120 }
121
122 assert(startOfBuffer.get() + size <= startOfData);
123 startOfData -= size;
124 return startOfData;
125}
126
127void EHScopeStack::deallocate(size_t size) {
128 startOfData += llvm::alignTo(size, ScopeStackAlignment);
129}
130
131/// Remove any 'null' fixups on the stack. However, we can't pop more
132/// fixups than the fixup depth on the innermost normal cleanup, or
133/// else fixups that we try to add to that cleanup will end up in the
134/// wrong place. We *could* try to shrink fixup depths, but that's
135/// actually a lot of work for little benefit.
137 // We expect this to only be called when there's still an innermost
138 // normal cleanup; otherwise there really shouldn't be any fixups.
139 cgf->cgm.errorNYI("popNullFixups");
140}
141
142void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
143 char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
144 bool isNormalCleanup = kind & NormalCleanup;
145 bool isEHCleanup = kind & EHCleanup;
146 bool isLifetimeMarker = kind & LifetimeMarker;
147
149
150 EHCleanupScope *scope = new (buffer) EHCleanupScope(
151 size, branchFixups.size(), innermostNormalCleanup, innermostEHScope);
152
153 if (isNormalCleanup)
154 innermostNormalCleanup = stable_begin();
155
156 if (isLifetimeMarker)
157 cgf->cgm.errorNYI("push lifetime marker cleanup");
158
159 // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
160 if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
161 cgf->getTarget().getCXXABI().isMicrosoft())
162 cgf->cgm.errorNYI("push seh cleanup");
163
164 return scope->getCleanupBuffer();
165}
166
168 assert(!empty() && "popping exception stack when not empty");
169
170 assert(isa<EHCleanupScope>(*begin()));
171 EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
172 innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
173 deallocate(cleanup.getAllocatedSize());
174
175 // Destroy the cleanup.
176 cleanup.destroy();
177
178 // Check whether we can shrink the branch-fixups stack.
179 if (!branchFixups.empty()) {
180 // If we no longer have any normal cleanups, all the fixups are
181 // complete.
182 if (!hasNormalCleanups()) {
183 branchFixups.clear();
184 } else {
185 // Otherwise we can still trim out unnecessary nulls.
187 }
188 }
189}
190
192 for (stable_iterator si = getInnermostEHScope(); si != stable_end();) {
193 if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) {
194 if (cleanup->isLifetimeMarker()) {
195 // Skip lifetime markers and continue from the enclosing EH scope
197 continue;
198 }
199 }
200 return true;
201 }
202 return false;
203}
204
205EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
206 char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
207 EHCatchScope *scope =
208 new (buffer) EHCatchScope(numHandlers, innermostEHScope);
209 innermostEHScope = stable_begin();
210 return scope;
211}
212
213static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup,
215 // Ask the cleanup to emit itself.
216 assert(cgf.haveInsertPoint() && "expected insertion point");
218 cleanup->emit(cgf, flags);
219 assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
220}
221
222static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
223 EHCleanupScope &scope) {
224 assert(scope.isNormalCleanup());
225 mlir::Block *entry = scope.getNormalBlock();
226 if (!entry) {
227 mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
229 scope.setNormalBlock(entry);
230 }
231 return entry;
232}
233
234/// Pops a cleanup block. If the block includes a normal cleanup, the
235/// current insertion point is threaded through the cleanup, as are
236/// any branch fixups on the cleanup.
238 assert(!ehStack.empty() && "cleanup stack is empty!");
239 assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
241 assert(scope.getFixupDepth() <= ehStack.getNumBranchFixups());
242
243 // Remember activation information.
244 bool isActive = scope.isActive();
245
246 // - whether there are branch fix-ups through this cleanup
247 unsigned fixupDepth = scope.getFixupDepth();
248 bool hasFixups = ehStack.getNumBranchFixups() != fixupDepth;
249
250 // - whether there's a fallthrough
251 mlir::Block *fallthroughSource = builder.getInsertionBlock();
252 bool hasFallthrough = fallthroughSource != nullptr && isActive;
253
254 bool requiresNormalCleanup =
255 scope.isNormalCleanup() && (hasFixups || hasFallthrough);
256
257 // If we don't need the cleanup at all, we're done.
259 if (!requiresNormalCleanup) {
260 ehStack.popCleanup();
261 return;
262 }
263
264 // Copy the cleanup emission data out. This uses either a stack
265 // array or malloc'd memory, depending on the size, which is
266 // behavior that SmallVector would provide, if we could use it
267 // here. Unfortunately, if you ask for a SmallVector<char>, the
268 // alignment isn't sufficient.
269 auto *cleanupSource = reinterpret_cast<char *>(scope.getCleanupBuffer());
271 cleanupBufferStack[8 * sizeof(void *)];
272 std::unique_ptr<char[]> cleanupBufferHeap;
273 size_t cleanupSize = scope.getCleanupSize();
275
276 // This is necessary because we are going to deallocate the cleanup
277 // (in popCleanup) before we emit it.
278 if (cleanupSize <= sizeof(cleanupBufferStack)) {
279 memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
280 cleanup = reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferStack);
281 } else {
282 cleanupBufferHeap.reset(new char[cleanupSize]);
283 memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
284 cleanup =
285 reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferHeap.get());
286 }
287
288 EHScopeStack::Cleanup::Flags cleanupFlags;
289 if (scope.isNormalCleanup())
290 cleanupFlags.setIsNormalCleanupKind();
291 if (scope.isEHCleanup())
292 cleanupFlags.setIsEHCleanupKind();
293
294 // If we have a fallthrough and no other need for the cleanup,
295 // emit it directly.
296 if (hasFallthrough && !hasFixups) {
298 ehStack.popCleanup();
299 scope.markEmitted();
300 emitCleanup(*this, cleanup, cleanupFlags);
301 } else {
302 // Otherwise, the best approach is to thread everything through
303 // the cleanup block and then try to clean up after ourselves.
304
305 // Force the entry block to exist.
306 mlir::Block *normalEntry = createNormalEntry(*this, scope);
307
308 // I. Set up the fallthrough edge in.
309 mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
310
311 // If there's a fallthrough, we need to store the cleanup
312 // destination index. For fall-throughs this is always zero.
313 if (hasFallthrough) {
315
316 } else if (fallthroughSource) {
317 // Otherwise, save and clear the IP if we don't have fallthrough
318 // because the cleanup is inactive.
319 assert(!isActive && "source without fallthrough for active cleanup");
320 savedInactiveFallthroughIP = builder.saveInsertionPoint();
321 }
322
323 // II. Emit the entry block. This implicitly branches to it if
324 // we have fallthrough. All the fixups and existing branches
325 // should already be branched to it.
326 builder.setInsertionPointToEnd(normalEntry);
327
328 // intercept normal cleanup to mark SEH scope end
330
331 // III. Figure out where we're going and build the cleanup
332 // epilogue.
333 bool hasEnclosingCleanups =
334 (scope.getEnclosingNormalCleanup() != ehStack.stable_end());
335
336 // Compute the branch-through dest if we need it:
337 // - if there are branch-throughs threaded through the scope
338 // - if fall-through is a branch-through
339 // - if there are fixups that will be optimistically forwarded
340 // to the enclosing cleanup
342 if (hasFixups && hasEnclosingCleanups)
343 cgm.errorNYI("cleanup branch-through dest");
344
345 mlir::Block *fallthroughDest = nullptr;
346
347 // If there's exactly one branch-after and no other threads,
348 // we can route it without a switch.
349 // Skip for SEH, since ExitSwitch is used to generate code to indicate
350 // abnormal termination. (SEH: Except _leave and fall-through at
351 // the end, all other exits in a _try (return/goto/continue/break)
352 // are considered as abnormal terminations, using NormalCleanupDestSlot
353 // to indicate abnormal termination)
356
357 // IV. Pop the cleanup and emit it.
358 scope.markEmitted();
359 ehStack.popCleanup();
360 assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
361
362 emitCleanup(*this, cleanup, cleanupFlags);
363
364 // Append the prepared cleanup prologue from above.
366
367 // Optimistically hope that any fixups will continue falling through.
368 if (fixupDepth != ehStack.getNumBranchFixups())
369 cgm.errorNYI("cleanup fixup depth mismatch");
370
371 // V. Set up the fallthrough edge out.
372
373 // Case 1: a fallthrough source exists but doesn't branch to the
374 // cleanup because the cleanup is inactive.
375 if (!hasFallthrough && fallthroughSource) {
376 // Prebranched fallthrough was forwarded earlier.
377 // Non-prebranched fallthrough doesn't need to be forwarded.
378 // Either way, all we need to do is restore the IP we cleared before.
379 assert(!isActive);
380 cgm.errorNYI("cleanup inactive fallthrough");
381
382 // Case 2: a fallthrough source exists and should branch to the
383 // cleanup, but we're not supposed to branch through to the next
384 // cleanup.
385 } else if (hasFallthrough && fallthroughDest) {
386 cgm.errorNYI("cleanup fallthrough destination");
387
388 // Case 3: a fallthrough source exists and should branch to the
389 // cleanup and then through to the next.
390 } else if (hasFallthrough) {
391 // Everything is already set up for this.
392
393 // Case 4: no fallthrough source exists.
394 } else {
395 // FIXME(cir): should we clear insertion point here?
396 }
397
398 // VI. Assorted cleaning.
399
400 // Check whether we can merge NormalEntry into a single predecessor.
401 // This might invalidate (non-IR) pointers to NormalEntry.
402 //
403 // If it did invalidate those pointers, and normalEntry was the same
404 // as NormalExit, go back and patch up the fixups.
406 }
407}
408
409/// Pops cleanup blocks until the given savepoint is reached.
411 EHScopeStack::stable_iterator oldCleanupStackDepth) {
413
414 // Pop cleanup blocks until we reach the base stack depth for the
415 // current scope.
416 while (ehStack.stable_begin() != oldCleanupStackDepth) {
418 }
419}
static mlir::Block * createNormalEntry(CIRGenFunction &cgf, EHCleanupScope &scope)
static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup, EHScopeStack::Cleanup::Flags flags)
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__DEVICE__ void * memcpy(void *__a, const void *__b, size_t __c)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
bool haveInsertPoint() const
True if an insertion point is defined.
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
CIRGenBuilderTy & getBuilder()
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
void popCleanupBlock()
Pops a cleanup block.
static size_t getSizeForNumHandlers(unsigned n)
A cleanup scope which generates the cleanup blocks lazily.
mlir::Block * getNormalBlock() const
static size_t getSizeForCleanupSize(size_t size)
Gets the size required for a lazy cleanup scope with the given cleanup-data requirements.
EHScopeStack::stable_iterator getEnclosingNormalCleanup() const
void setNormalBlock(mlir::Block *bb)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
bool encloses(stable_iterator other) const
Returns true if this scope encloses I.
void popCleanup()
Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
iterator find(stable_iterator savePoint) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
bool empty() const
Determines whether the exception-scopes stack is empty.
void popNullFixups()
Pops lazily-removed fixups from the end of the list.
class EHCatchScope * pushCatch(unsigned numHandlers)
Push a set of catch handlers on the stack.
bool requiresCatchOrCleanup() const
stable_iterator getInnermostActiveNormalCleanup() const
stable_iterator getInnermostEHScope() const
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
iterator begin() const
Returns an iterator pointing to the innermost EH scope.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Represents a C++ temporary.
Definition ExprCXX.h:1459
A (possibly-)qualified type.
Definition TypeBase.h:937
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool cleanupBranchThrough()
static bool ehCleanupActiveFlag()
static bool emitLifetimeMarkers()
static bool ehCleanupScopeRequiresEHCleanup()
static bool ehstackBranches()
static bool innermostEHScope()
static bool ehCleanupHasPrebranchedFallthrough()
static bool cleanupAppendInsts()
static bool simplifyCleanupEntry()
cir::BrOp initialBranch
The initial branch of the fixup.
mlir::Block * destination
The ultimate destination of the branch.
mlir::Block * optimisticBranchBlock
The block containing the terminator which needs to be modified into a switch if this fixup is resolve...
unsigned destinationIndex
The destination index value.
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)