39 assert(dest.getBlock() &&
"assumes incoming valid dest");
40 auto brOp = cir::BrOp::create(builder, loc, dest.getBlock());
44 ehStack.getInnermostActiveNormalCleanup();
49 if (topCleanup ==
ehStack.stable_end() ||
50 topCleanup.
encloses(dest.getScopeDepth())) {
57 if (!dest.getScopeDepth().
isValid()) {
67 cgm.errorNYI(loc,
"emitBranchThroughCleanup: valid destination scope depth");
81void EHScopeStack::Cleanup::anchor() {}
91 si =
cleanup.getEnclosingNormalCleanup();
97char *EHScopeStack::allocate(
size_t size) {
100 unsigned capacity = llvm::PowerOf2Ceil(std::max(size, 1024ul));
101 startOfBuffer = std::make_unique<char[]>(capacity);
102 startOfData = endOfBuffer = startOfBuffer.get() + capacity;
103 }
else if (
static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
104 unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
105 unsigned usedCapacity =
106 currentCapacity - (startOfData - startOfBuffer.get());
107 unsigned requiredCapacity = usedCapacity + size;
110 unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
112 std::unique_ptr<char[]> newStartOfBuffer =
113 std::make_unique<char[]>(newCapacity);
114 char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
115 char *newStartOfData = newEndOfBuffer - usedCapacity;
116 memcpy(newStartOfData, startOfData, usedCapacity);
117 startOfBuffer.swap(newStartOfBuffer);
118 endOfBuffer = newEndOfBuffer;
119 startOfData = newStartOfData;
122 assert(startOfBuffer.get() + size <= startOfData);
127void EHScopeStack::deallocate(
size_t size) {
139 cgf->cgm.errorNYI(
"popNullFixups");
142void *EHScopeStack::pushCleanup(
CleanupKind kind,
size_t size) {
150 EHCleanupScope *scope =
new (buffer) EHCleanupScope(
151 size, branchFixups.size(), innermostNormalCleanup, innermostEHScope);
156 if (isLifetimeMarker)
157 cgf->cgm.errorNYI(
"push lifetime marker cleanup");
160 if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
161 cgf->getTarget().getCXXABI().isMicrosoft())
162 cgf->cgm.errorNYI(
"push seh cleanup");
168 assert(!
empty() &&
"popping exception stack when not empty");
172 innermostNormalCleanup =
cleanup.getEnclosingNormalCleanup();
173 deallocate(
cleanup.getAllocatedSize());
179 if (!branchFixups.empty()) {
183 branchFixups.clear();
193 if (
auto *
cleanup = dyn_cast<EHCleanupScope>(&*
find(si))) {
194 if (
cleanup->isLifetimeMarker()) {
207 EHCatchScope *scope =
208 new (buffer) EHCatchScope(numHandlers, innermostEHScope);
218 assert(cgf.
haveInsertPoint() &&
"cleanup ended with no insertion point?");
226 mlir::OpBuilder::InsertionGuard guard(cgf.
getBuilder());
237 assert(!
ehStack.empty() &&
"cleanup stack is empty!");
247 bool hasFixups =
ehStack.getNumBranchFixups() != fixupDepth;
250 mlir::Block *fallthroughSource = builder.getInsertionBlock();
251 bool hasFallthrough = fallthroughSource !=
nullptr && isActive;
253 bool requiresNormalCleanup =
258 if (!requiresNormalCleanup) {
270 cleanupBufferStack[8 *
sizeof(
void *)];
271 std::unique_ptr<char[]> cleanupBufferHeap;
277 if (cleanupSize <=
sizeof(cleanupBufferStack)) {
278 memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
281 cleanupBufferHeap.reset(
new char[cleanupSize]);
282 memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
291 if (hasFallthrough && !hasFixups) {
304 mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
308 if (hasFallthrough) {
311 }
else if (fallthroughSource) {
314 assert(!isActive &&
"source without fallthrough for active cleanup");
315 savedInactiveFallthroughIP = builder.saveInsertionPoint();
321 builder.setInsertionPointToEnd(normalEntry);
328 bool hasEnclosingCleanups =
337 if (hasFixups && hasEnclosingCleanups)
338 cgm.errorNYI(
"cleanup branch-through dest");
340 mlir::Block *fallthroughDest =
nullptr;
355 assert(
ehStack.hasNormalCleanups() == hasEnclosingCleanups);
363 if (fixupDepth !=
ehStack.getNumBranchFixups())
364 cgm.errorNYI(
"cleanup fixup depth mismatch");
370 if (!hasFallthrough && fallthroughSource) {
375 cgm.errorNYI(
"cleanup inactive fallthrough");
380 }
else if (hasFallthrough && fallthroughDest) {
381 cgm.errorNYI(
"cleanup fallthrough destination");
385 }
else if (hasFallthrough) {
411 while (
ehStack.stable_begin() != oldCleanupStackDepth) {
static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup)
static mlir::Block * createNormalEntry(CIRGenFunction &cgf, EHCleanupScope &scope)
__DEVICE__ void * memcpy(void *__a, const void *__b, size_t __c)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
static Destroyer destroyCXXObject
bool haveInsertPoint() const
True if an insertion point is defined.
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
CIRGenBuilderTy & getBuilder()
LexicalScope * curLexScope
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
void popCleanupBlock()
Pops a cleanup block.
static size_t getSizeForNumHandlers(unsigned n)
A cleanup scope which generates the cleanup blocks lazily.
mlir::Block * getNormalBlock() const
unsigned getFixupDepth() const
size_t getCleanupSize() const
static size_t getSizeForCleanupSize(size_t size)
Gets the size required for a lazy cleanup scope with the given cleanup-data requirements.
bool isNormalCleanup() const
EHScopeStack::stable_iterator getEnclosingNormalCleanup() const
void * getCleanupBuffer()
void setNormalBlock(mlir::Block *bb)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
bool encloses(stable_iterator other) const
Returns true if this scope encloses I.
void popCleanup()
Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
iterator find(stable_iterator savePoint) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
bool empty() const
Determines whether the exception-scopes stack is empty.
void popNullFixups()
Pops lazily-removed fixups from the end of the list.
class EHCatchScope * pushCatch(unsigned numHandlers)
Push a set of catch handlers on the stack.
bool requiresCatchOrCleanup() const
stable_iterator getInnermostActiveNormalCleanup() const
stable_iterator getInnermostEHScope() const
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
iterator begin() const
Returns an iterator pointing to the innermost EH scope.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Represents a C++ temporary.
A (possibly-)qualified type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
U cast(CodeGen::Address addr)
static bool cleanupBranchThrough()
static bool emitLifetimeMarkers()
static bool ehCleanupFlags()
static bool ehCleanupScopeRequiresEHCleanup()
static bool ehstackBranches()
static bool innermostEHScope()
static bool ehCleanupHasPrebranchedFallthrough()
static bool cleanupAppendInsts()
static bool simplifyCleanupEntry()
cir::BrOp initialBranch
The initial branch of the fixup.
mlir::Block * destination
The ultimate destination of the branch.
mlir::Block * optimisticBranchBlock
The block containing the terminator which needs to be modified into a switch if this fixup is resolve...
unsigned destinationIndex
The destination index value.
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)