clang 23.0.0git
CIRGenCleanup.cpp
Go to the documentation of this file.
1//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains code dealing with the IR generation for cleanups
10// and related information.
11//
12// A "cleanup" is a piece of code which needs to be executed whenever
13// control transfers out of a particular scope. This can be
14// conditionalized to occur only on exceptional control flow, only on
15// normal control flow, or both.
16//
17//===----------------------------------------------------------------------===//
18
19#include "CIRGenCleanup.h"
20#include "CIRGenFunction.h"
21
23
24using namespace clang;
25using namespace clang::CIRGen;
26
27//===----------------------------------------------------------------------===//
28// CIRGenFunction cleanup related
29//===----------------------------------------------------------------------===//
30
31/// Emits all the code to cause the given temporary to be cleaned up.
33 QualType tempType, Address ptr) {
35}
36
37//===----------------------------------------------------------------------===//
38// EHScopeStack
39//===----------------------------------------------------------------------===//
40
41void EHScopeStack::Cleanup::anchor() {}
42
45 stable_iterator si = getInnermostNormalCleanup();
46 stable_iterator se = stable_end();
47 while (si != se) {
48 EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
49 if (cleanup.isActive())
50 return si;
51 si = cleanup.getEnclosingNormalCleanup();
52 }
53 return stable_end();
54}
55
56/// Push an entry of the given size onto this protected-scope stack.
57char *EHScopeStack::allocate(size_t size) {
58 size = llvm::alignTo(size, ScopeStackAlignment);
59 if (!startOfBuffer) {
60 unsigned capacity = llvm::PowerOf2Ceil(std::max<size_t>(size, 1024ul));
61 startOfBuffer = std::make_unique<char[]>(capacity);
62 startOfData = endOfBuffer = startOfBuffer.get() + capacity;
63 } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
64 unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
65 unsigned usedCapacity =
66 currentCapacity - (startOfData - startOfBuffer.get());
67 unsigned requiredCapacity = usedCapacity + size;
68 // We know from the 'else if' condition that requiredCapacity is greater
69 // than currentCapacity.
70 unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
71
72 std::unique_ptr<char[]> newStartOfBuffer =
73 std::make_unique<char[]>(newCapacity);
74 char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
75 char *newStartOfData = newEndOfBuffer - usedCapacity;
76 memcpy(newStartOfData, startOfData, usedCapacity);
77 startOfBuffer.swap(newStartOfBuffer);
78 endOfBuffer = newEndOfBuffer;
79 startOfData = newStartOfData;
80 }
81
82 assert(startOfBuffer.get() + size <= startOfData);
83 startOfData -= size;
84 return startOfData;
85}
86
87void EHScopeStack::deallocate(size_t size) {
88 startOfData += llvm::alignTo(size, ScopeStackAlignment);
89}
90
91void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
92 char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
93 bool isNormalCleanup = kind & NormalCleanup;
94 bool isEHCleanup = kind & EHCleanup;
95 bool isLifetimeMarker = kind & LifetimeMarker;
96 bool skipCleanupScope = false;
97
98 cir::CleanupKind cleanupKind = cir::CleanupKind::All;
99 if (isEHCleanup && cgf->getLangOpts().Exceptions) {
100 cleanupKind =
101 isNormalCleanup ? cir::CleanupKind::All : cir::CleanupKind::EH;
102 } else {
103 if (isNormalCleanup)
104 cleanupKind = cir::CleanupKind::Normal;
105 else
106 skipCleanupScope = true;
107 }
108
109 cir::CleanupScopeOp cleanupScope = nullptr;
110 if (!skipCleanupScope) {
111 CIRGenBuilderTy &builder = cgf->getBuilder();
112 mlir::Location loc = builder.getUnknownLoc();
113 cleanupScope = cir::CleanupScopeOp::create(
114 builder, loc, cleanupKind,
115 /*bodyBuilder=*/
116 [&](mlir::OpBuilder &b, mlir::Location loc) {
117 // Terminations will be handled in popCleanup
118 },
119 /*cleanupBuilder=*/
120 [&](mlir::OpBuilder &b, mlir::Location loc) {
121 // Terminations will be handled after emiting cleanup
122 });
123
124 builder.setInsertionPointToEnd(&cleanupScope.getBodyRegion().back());
125 }
126
127 // Per C++ [except.terminate], it is implementation-defined whether none,
128 // some, or all cleanups are called before std::terminate. Thus, when
129 // terminate is the current EH scope, we may skip adding any EH cleanup
130 // scopes.
131 if (innermostEHScope != stable_end() &&
132 find(innermostEHScope)->getKind() == EHScope::Terminate)
133 isEHCleanup = false;
134
135 EHCleanupScope *scope = new (buffer)
136 EHCleanupScope(isNormalCleanup, isEHCleanup, size, cleanupScope,
137 innermostNormalCleanup, innermostEHScope);
138
139 if (isNormalCleanup)
140 innermostNormalCleanup = stable_begin();
141
142 if (isEHCleanup)
143 innermostEHScope = stable_begin();
144
145 if (isLifetimeMarker)
146 cgf->cgm.errorNYI("push lifetime marker cleanup");
147
148 // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
149 if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
150 cgf->getTarget().getCXXABI().isMicrosoft())
151 cgf->cgm.errorNYI("push seh cleanup");
152
153 return scope->getCleanupBuffer();
154}
155
157 assert(!empty() && "popping exception stack when not empty");
158
159 assert(isa<EHCleanupScope>(*begin()));
160 EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
161 innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
162 innermostEHScope = cleanup.getEnclosingEHScope();
163 deallocate(cleanup.getAllocatedSize());
164
165 cir::CleanupScopeOp cleanupScope = cleanup.getCleanupScopeOp();
166 if (cleanupScope) {
167 auto *block = &cleanupScope.getBodyRegion().back();
168 if (!block->mightHaveTerminator()) {
169 mlir::OpBuilder::InsertionGuard guard(cgf->getBuilder());
170 cgf->getBuilder().setInsertionPointToEnd(block);
171 cir::YieldOp::create(cgf->getBuilder(),
172 cgf->getBuilder().getUnknownLoc());
173 }
174 cgf->getBuilder().setInsertionPointAfter(cleanupScope);
175 }
176
177 // Destroy the cleanup.
178 cleanup.destroy();
179}
180
182 for (stable_iterator si = getInnermostEHScope(); si != stable_end();) {
183 if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) {
184 if (cleanup->isLifetimeMarker()) {
185 // Skip lifetime markers and continue from the enclosing EH scope
187 continue;
188 }
189 }
190 return true;
191 }
192 return false;
193}
194
195/// Deactive a cleanup that was created in an active state.
197 mlir::Operation *dominatingIP) {
198 assert(c != ehStack.stable_end() && "deactivating bottom of stack?");
200 assert(scope.isActive() && "double deactivation");
201
202 // If it's the top of the stack, just pop it, but do so only if it belongs
203 // to the current RunCleanupsScope.
204 if (c == ehStack.stable_begin() &&
205 currentCleanupStackDepth.strictlyEncloses(c)) {
207 return;
208 }
209
210 // Otherwise, follow the general case.
211 cgm.errorNYI("deactivateCleanupBlock: setupCleanupBlockActivation");
212}
213
214static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope,
215 EHScopeStack::Cleanup *cleanup,
217 CIRGenBuilderTy &builder = cgf.getBuilder();
218 mlir::Block &block = cleanupScope.getCleanupRegion().back();
219
220 mlir::OpBuilder::InsertionGuard guard(builder);
221 builder.setInsertionPointToStart(&block);
222
223 // Ask the cleanup to emit itself.
224 assert(cgf.haveInsertPoint() && "expected insertion point");
226 cleanup->emit(cgf, flags);
227 assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
228
229 mlir::Block &cleanupRegionLastBlock = cleanupScope.getCleanupRegion().back();
230 if (cleanupRegionLastBlock.empty() ||
231 !cleanupRegionLastBlock.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
232 mlir::OpBuilder::InsertionGuard guardCase(builder);
233 builder.setInsertionPointToEnd(&cleanupRegionLastBlock);
234 builder.createYield(cleanupScope.getLoc());
235 }
236}
237
238static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
239 EHCleanupScope &scope) {
240 assert(scope.isNormalCleanup());
241 mlir::Block *entry = scope.getNormalBlock();
242 if (!entry) {
243 mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
245 scope.setNormalBlock(entry);
246 }
247 return entry;
248}
249
251 assert(!ehStack.empty() && "cleanup stack is empty!");
252 assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
254
255 cir::CleanupScopeOp cleanupScope = scope.getCleanupScopeOp();
256 assert(cleanupScope && "CleanupScopeOp is nullptr");
257
258 // Remember activation information.
259 bool isActive = scope.isActive();
260
261 // - whether there's a fallthrough
262 mlir::Block *fallthroughSource = builder.getInsertionBlock();
263 bool hasFallthrough = fallthroughSource != nullptr && isActive;
264
265 bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
266 bool requiresEHCleanup = scope.isEHCleanup() && hasFallthrough;
267
268 // If we don't need the cleanup at all, we're done.
270 if (!requiresNormalCleanup && !requiresEHCleanup) {
271 ehStack.popCleanup();
272 return;
273 }
274
275 // Copy the cleanup emission data out. This uses either a stack
276 // array or malloc'd memory, depending on the size, which is
277 // behavior that SmallVector would provide, if we could use it
278 // here. Unfortunately, if you ask for a SmallVector<char>, the
279 // alignment isn't sufficient.
280 auto *cleanupSource = reinterpret_cast<char *>(scope.getCleanupBuffer());
282 cleanupBufferStack[8 * sizeof(void *)];
283 std::unique_ptr<char[]> cleanupBufferHeap;
284 size_t cleanupSize = scope.getCleanupSize();
286
287 // This is necessary because we are going to deallocate the cleanup
288 // (in popCleanup) before we emit it.
289 if (cleanupSize <= sizeof(cleanupBufferStack)) {
290 memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
291 cleanup = reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferStack);
292 } else {
293 cleanupBufferHeap.reset(new char[cleanupSize]);
294 memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
295 cleanup =
296 reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferHeap.get());
297 }
298
299 EHScopeStack::Cleanup::Flags cleanupFlags;
300 if (scope.isNormalCleanup())
301 cleanupFlags.setIsNormalCleanupKind();
302 if (scope.isEHCleanup())
303 cleanupFlags.setIsEHCleanupKind();
304
305 // If we have a fallthrough and no other need for the cleanup,
306 // emit it directly.
307 if (hasFallthrough) {
309 ehStack.popCleanup();
310 scope.markEmitted();
311 emitCleanup(*this, cleanupScope, cleanup, cleanupFlags);
312 } else {
313 // Otherwise, the best approach is to thread everything through
314 // the cleanup block and then try to clean up after ourselves.
315
316 // Force the entry block to exist.
317 mlir::Block *normalEntry = createNormalEntry(*this, scope);
318
319 // I. Set up the fallthrough edge in.
320 mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
321
322 // If we have a fallthrough source, but this cleanup is inactive,
323 // save and clear the IP.
324 if (!hasFallthrough && fallthroughSource) {
325 assert(!isActive && "source without fallthrough for active cleanup");
326 savedInactiveFallthroughIP = builder.saveInsertionPoint();
327 }
328
329 // II. Emit the entry block. This implicitly branches to it if
330 // we have fallthrough. All the fixups and existing branches
331 // should already be branched to it.
332 builder.setInsertionPointToEnd(normalEntry);
333
334 // intercept normal cleanup to mark SEH scope end
336
337 // III. Figure out where we're going and build the cleanup
338 // epilogue.
339 bool hasEnclosingCleanups =
340 (scope.getEnclosingNormalCleanup() != ehStack.stable_end());
341
342 // Compute the branch-through dest if we need it:
343 // - if there are branch-throughs threaded through the scope
344 // - if fall-through is a branch-through
345 // - if there are fixups that will be optimistically forwarded
346 // to the enclosing cleanup
348 if (hasEnclosingCleanups)
349 cgm.errorNYI("cleanup branch-through dest");
350
351 mlir::Block *fallthroughDest = nullptr;
352
353 // If there's exactly one branch-after and no other threads,
354 // we can route it without a switch.
355 // Skip for SEH, since ExitSwitch is used to generate code to indicate
356 // abnormal termination. (SEH: Except _leave and fall-through at
357 // the end, all other exits in a _try (return/goto/continue/break)
358 // are considered as abnormal terminations, using NormalCleanupDestSlot
359 // to indicate abnormal termination)
362
363 // IV. Pop the cleanup and emit it.
364 scope.markEmitted();
365 ehStack.popCleanup();
366 assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
367 emitCleanup(*this, cleanupScope, cleanup, cleanupFlags);
368
369 // Append the prepared cleanup prologue from above.
371
372 // V. Set up the fallthrough edge out.
373
374 // Case 1: a fallthrough source exists but doesn't branch to the
375 // cleanup because the cleanup is inactive.
376 if (!hasFallthrough && fallthroughSource) {
377 // Prebranched fallthrough was forwarded earlier.
378 // Non-prebranched fallthrough doesn't need to be forwarded.
379 // Either way, all we need to do is restore the IP we cleared before.
380 assert(!isActive);
381 cgm.errorNYI("cleanup inactive fallthrough");
382
383 // Case 2: a fallthrough source exists and should branch to the
384 // cleanup, but we're not supposed to branch through to the next
385 // cleanup.
386 } else if (hasFallthrough && fallthroughDest) {
387 cgm.errorNYI("cleanup fallthrough destination");
388
389 // Case 3: a fallthrough source exists and should branch to the
390 // cleanup and then through to the next.
391 } else if (hasFallthrough) {
392 // Everything is already set up for this.
393
394 // Case 4: no fallthrough source exists.
395 } else {
396 // FIXME(cir): should we clear insertion point here?
397 }
398
399 // VI. Assorted cleaning.
400
401 // Check whether we can merge NormalEntry into a single predecessor.
402 // This might invalidate (non-IR) pointers to NormalEntry.
403 //
404 // If it did invalidate those pointers, and normalEntry was the same
405 // as NormalExit, go back and patch up the fixups.
407 }
408}
409
410/// Pops cleanup blocks until the given savepoint is reached.
412 EHScopeStack::stable_iterator oldCleanupStackDepth,
413 ArrayRef<mlir::Value *> valuesToReload) {
414 // If the current stack depth is the same as the cleanup stack depth,
415 // we won't be exiting any cleanup scopes, so we don't need to reload
416 // any values.
417 bool requiresCleanup = false;
418 for (auto it = ehStack.begin(), ie = ehStack.find(oldCleanupStackDepth);
419 it != ie; ++it) {
420 if (isa<EHCleanupScope>(&*it)) {
421 requiresCleanup = true;
422 break;
423 }
424 }
425
426 // If there are values that we need to keep live, spill them now before
427 // we pop the cleanup blocks. These are passed as pointers to mlir::Value
428 // because we're going to replace them with the reloaded value.
429 SmallVector<Address> tempAllocas;
430 if (requiresCleanup) {
431 for (mlir::Value *valPtr : valuesToReload) {
432 mlir::Value val = *valPtr;
433 if (!val)
434 continue;
435
436 // TODO(cir): Check for static allocas.
437
438 Address temp = createDefaultAlignTempAlloca(val.getType(), val.getLoc(),
439 "tmp.exprcleanup");
440 tempAllocas.push_back(temp);
441 builder.createStore(val.getLoc(), val, temp);
442 }
443 }
444
445 // Pop cleanup blocks until we reach the base stack depth for the
446 // current scope.
447 while (ehStack.stable_begin() != oldCleanupStackDepth)
449
450 // Reload the values that we spilled, if necessary.
451 if (requiresCleanup) {
452 for (auto [addr, valPtr] : llvm::zip(tempAllocas, valuesToReload)) {
453 mlir::Location loc = valPtr->getLoc();
454 *valPtr = builder.createLoad(loc, addr);
455 }
456 }
457}
static mlir::Block * createNormalEntry(CIRGenFunction &cgf, EHCleanupScope &scope)
static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope, EHScopeStack::Cleanup *cleanup, EHScopeStack::Cleanup::Flags flags)
static Decl::Kind getKind(const Decl *D)
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__DEVICE__ void * memcpy(void *__a, const void *__b, size_t __c)
__device__ __2f16 b
__device__ __2f16 float c
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
bool haveInsertPoint() const
True if an insertion point is defined.
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
CIRGenBuilderTy & getBuilder()
EHScopeStack::stable_iterator currentCleanupStackDepth
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
A cleanup scope which generates the cleanup blocks lazily.
mlir::Block * getNormalBlock() const
cir::CleanupScopeOp getCleanupScopeOp()
static size_t getSizeForCleanupSize(size_t size)
Gets the size required for a lazy cleanup scope with the given cleanup-data requirements.
EHScopeStack::stable_iterator getEnclosingNormalCleanup() const
void setNormalBlock(mlir::Block *bb)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
void popCleanup()
Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
iterator find(stable_iterator savePoint) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
bool empty() const
Determines whether the exception-scopes stack is empty.
bool requiresCatchOrCleanup() const
stable_iterator getInnermostActiveNormalCleanup() const
stable_iterator getInnermostEHScope() const
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
iterator begin() const
Returns an iterator pointing to the innermost EH scope.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Represents a C++ temporary.
Definition ExprCXX.h:1460
A (possibly-)qualified type.
Definition TypeBase.h:937
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool cleanupBranchThrough()
static bool ehCleanupActiveFlag()
static bool emitLifetimeMarkers()
static bool ehCleanupScopeRequiresEHCleanup()
static bool cleanupAppendInsts()
static bool simplifyCleanupEntry()
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)