41void EHScopeStack::Cleanup::anchor() {}
51 si =
cleanup.getEnclosingNormalCleanup();
57char *EHScopeStack::allocate(
size_t size) {
60 unsigned capacity = llvm::PowerOf2Ceil(std::max<size_t>(size, 1024ul));
61 startOfBuffer = std::make_unique<char[]>(capacity);
62 startOfData = endOfBuffer = startOfBuffer.get() + capacity;
63 }
else if (
static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
64 unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
65 unsigned usedCapacity =
66 currentCapacity - (startOfData - startOfBuffer.get());
67 unsigned requiredCapacity = usedCapacity + size;
70 unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
72 std::unique_ptr<char[]> newStartOfBuffer =
73 std::make_unique<char[]>(newCapacity);
74 char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
75 char *newStartOfData = newEndOfBuffer - usedCapacity;
76 memcpy(newStartOfData, startOfData, usedCapacity);
77 startOfBuffer.swap(newStartOfBuffer);
78 endOfBuffer = newEndOfBuffer;
79 startOfData = newStartOfData;
82 assert(startOfBuffer.get() + size <= startOfData);
87void EHScopeStack::deallocate(
size_t size) {
96 bool skipCleanupScope =
false;
98 cir::CleanupKind cleanupKind = cir::CleanupKind::All;
99 if (isEHCleanup && cgf->getLangOpts().Exceptions) {
101 isNormalCleanup ? cir::CleanupKind::All : cir::CleanupKind::EH;
104 cleanupKind = cir::CleanupKind::Normal;
106 skipCleanupScope =
true;
109 cir::CleanupScopeOp cleanupScope =
nullptr;
110 if (!skipCleanupScope) {
111 CIRGenBuilderTy &builder = cgf->getBuilder();
112 mlir::Location loc = builder.getUnknownLoc();
113 cleanupScope = cir::CleanupScopeOp::create(
114 builder, loc, cleanupKind,
116 [&](mlir::OpBuilder &
b, mlir::Location loc) {
120 [&](mlir::OpBuilder &
b, mlir::Location loc) {
124 builder.setInsertionPointToEnd(&cleanupScope.getBodyRegion().back());
135 EHCleanupScope *scope =
new (buffer)
136 EHCleanupScope(isNormalCleanup, isEHCleanup, size, cleanupScope,
137 innermostNormalCleanup, innermostEHScope);
145 if (isLifetimeMarker)
146 cgf->cgm.errorNYI(
"push lifetime marker cleanup");
149 if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
150 cgf->getTarget().getCXXABI().isMicrosoft())
151 cgf->cgm.errorNYI(
"push seh cleanup");
157 assert(!
empty() &&
"popping exception stack when not empty");
161 innermostNormalCleanup =
cleanup.getEnclosingNormalCleanup();
162 innermostEHScope =
cleanup.getEnclosingEHScope();
163 deallocate(
cleanup.getAllocatedSize());
165 cir::CleanupScopeOp cleanupScope =
cleanup.getCleanupScopeOp();
167 auto *block = &cleanupScope.getBodyRegion().back();
168 if (!block->mightHaveTerminator()) {
169 mlir::OpBuilder::InsertionGuard guard(cgf->getBuilder());
170 cgf->getBuilder().setInsertionPointToEnd(block);
171 cir::YieldOp::create(cgf->getBuilder(),
172 cgf->getBuilder().getUnknownLoc());
174 cgf->getBuilder().setInsertionPointAfter(cleanupScope);
183 if (
auto *
cleanup = dyn_cast<EHCleanupScope>(&*
find(si))) {
184 if (
cleanup->isLifetimeMarker()) {
197 mlir::Operation *dominatingIP) {
198 assert(
c !=
ehStack.stable_end() &&
"deactivating bottom of stack?");
200 assert(scope.
isActive() &&
"double deactivation");
211 cgm.errorNYI(
"deactivateCleanupBlock: setupCleanupBlockActivation");
215 EHScopeStack::Cleanup *
cleanup,
218 mlir::Block &block = cleanupScope.getCleanupRegion().back();
220 mlir::OpBuilder::InsertionGuard guard(builder);
221 builder.setInsertionPointToStart(&block);
227 assert(cgf.
haveInsertPoint() &&
"cleanup ended with no insertion point?");
229 mlir::Block &cleanupRegionLastBlock = cleanupScope.getCleanupRegion().back();
230 if (cleanupRegionLastBlock.empty() ||
231 !cleanupRegionLastBlock.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
232 mlir::OpBuilder::InsertionGuard guardCase(builder);
233 builder.setInsertionPointToEnd(&cleanupRegionLastBlock);
243 mlir::OpBuilder::InsertionGuard guard(cgf.
getBuilder());
251 assert(!
ehStack.empty() &&
"cleanup stack is empty!");
256 assert(cleanupScope &&
"CleanupScopeOp is nullptr");
262 mlir::Block *fallthroughSource = builder.getInsertionBlock();
263 bool hasFallthrough = fallthroughSource !=
nullptr && isActive;
265 bool requiresNormalCleanup = scope.
isNormalCleanup() && hasFallthrough;
266 bool requiresEHCleanup = scope.
isEHCleanup() && hasFallthrough;
270 if (!requiresNormalCleanup && !requiresEHCleanup) {
282 cleanupBufferStack[8 *
sizeof(
void *)];
283 std::unique_ptr<char[]> cleanupBufferHeap;
289 if (cleanupSize <=
sizeof(cleanupBufferStack)) {
290 memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
293 cleanupBufferHeap.reset(
new char[cleanupSize]);
294 memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
307 if (hasFallthrough) {
320 mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
324 if (!hasFallthrough && fallthroughSource) {
325 assert(!isActive &&
"source without fallthrough for active cleanup");
326 savedInactiveFallthroughIP = builder.saveInsertionPoint();
332 builder.setInsertionPointToEnd(normalEntry);
339 bool hasEnclosingCleanups =
348 if (hasEnclosingCleanups)
349 cgm.errorNYI(
"cleanup branch-through dest");
351 mlir::Block *fallthroughDest =
nullptr;
366 assert(
ehStack.hasNormalCleanups() == hasEnclosingCleanups);
376 if (!hasFallthrough && fallthroughSource) {
381 cgm.errorNYI(
"cleanup inactive fallthrough");
386 }
else if (hasFallthrough && fallthroughDest) {
387 cgm.errorNYI(
"cleanup fallthrough destination");
391 }
else if (hasFallthrough) {
417 bool requiresCleanup =
false;
418 for (
auto it =
ehStack.begin(), ie =
ehStack.find(oldCleanupStackDepth);
421 requiresCleanup =
true;
430 if (requiresCleanup) {
431 for (mlir::Value *valPtr : valuesToReload) {
432 mlir::Value val = *valPtr;
440 tempAllocas.push_back(temp);
441 builder.createStore(val.getLoc(), val, temp);
447 while (
ehStack.stable_begin() != oldCleanupStackDepth)
451 if (requiresCleanup) {
452 for (
auto [addr, valPtr] : llvm::zip(tempAllocas, valuesToReload)) {
453 mlir::Location loc = valPtr->getLoc();
454 *valPtr = builder.createLoad(loc, addr);
static mlir::Block * createNormalEntry(CIRGenFunction &cgf, EHCleanupScope &scope)
static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope, EHScopeStack::Cleanup *cleanup, EHScopeStack::Cleanup::Flags flags)
static Decl::Kind getKind(const Decl *D)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__DEVICE__ void * memcpy(void *__a, const void *__b, size_t __c)
__device__ __2f16 float c
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
static Destroyer destroyCXXObject
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
bool haveInsertPoint() const
True if an insertion point is defined.
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
CIRGenBuilderTy & getBuilder()
EHScopeStack::stable_iterator currentCleanupStackDepth
LexicalScope * curLexScope
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
A cleanup scope which generates the cleanup blocks lazily.
mlir::Block * getNormalBlock() const
size_t getCleanupSize() const
cir::CleanupScopeOp getCleanupScopeOp()
static size_t getSizeForCleanupSize(size_t size)
Gets the size required for a lazy cleanup scope with the given cleanup-data requirements.
bool isNormalCleanup() const
EHScopeStack::stable_iterator getEnclosingNormalCleanup() const
void * getCleanupBuffer()
void setNormalBlock(mlir::Block *bb)
void setIsEHCleanupKind()
void setIsNormalCleanupKind()
Information for lazily generating a cleanup.
A saved depth on the scope stack.
void popCleanup()
Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
iterator find(stable_iterator savePoint) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
bool empty() const
Determines whether the exception-scopes stack is empty.
bool requiresCatchOrCleanup() const
stable_iterator getInnermostActiveNormalCleanup() const
stable_iterator getInnermostEHScope() const
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
iterator begin() const
Returns an iterator pointing to the innermost EH scope.
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Represents a C++ temporary.
A (possibly-)qualified type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
U cast(CodeGen::Address addr)
static bool cleanupBranchThrough()
static bool ehCleanupActiveFlag()
static bool emitLifetimeMarkers()
static bool ehCleanupScopeRequiresEHCleanup()
static bool cleanupAppendInsts()
static bool simplifyCleanupEntry()
mlir::Block * getOrCreateCleanupBlock(mlir::OpBuilder &builder)