34class ConditionalEvaluationFinder
36 bool foundConditional =
false;
39 bool found()
const {
return foundConditional; }
41 bool VisitAbstractConditionalOperator(AbstractConditionalOperator *) {
42 foundConditional =
true;
47 bool TraverseLambdaExpr(
LambdaExpr *) {
return true; }
48 bool TraverseBlockExpr(BlockExpr *) {
return true; }
49 bool TraverseStmtExpr(StmtExpr *) {
return true; }
65 mlir::Location loc = builder.getUnknownLoc();
78 mlir::OpBuilder::InsertionGuard guard(builder);
80 builder.createFlagStore(loc,
false, active.
getPointer());
84 builder.createFlagStore(loc,
true, active.
getPointer());
95 assert(!
cleanup.hasActiveFlag() &&
"cleanup already has active flag?");
96 cleanup.setActiveFlag(activeFlag);
104 : cgf(cgf), cleanups(cgf), scope(
nullptr),
107 assert(subExpr &&
"ExprWithCleanups always has a sub-expression");
108 ConditionalEvaluationFinder finder;
109 finder.TraverseStmt(
const_cast<Expr *
>(subExpr));
110 if (finder.found()) {
111 mlir::Location loc = cgf.builder.getUnknownLoc();
112 cir::CleanupKind cleanupKind = cgf.getLangOpts().Exceptions
113 ? cir::CleanupKind::All
114 : cir::CleanupKind::Normal;
115 scope = cir::CleanupScopeOp::create(
116 cgf.builder, loc, cleanupKind,
118 [&](mlir::OpBuilder &
b, mlir::Location loc) {},
120 [&](mlir::OpBuilder &
b, mlir::Location loc) {});
121 cgf.builder.setInsertionPointToEnd(&scope.getBodyRegion().front());
127 assert(!exited &&
"FullExprCleanupScope::exit called twice");
130 size_t oldSize = deferredCleanupStackSize;
131 bool hasDeferredCleanups =
132 cgf.deferredConditionalCleanupStack.size() > oldSize;
135 cgf.deferredConditionalCleanupStack.truncate(oldSize);
136 cleanups.forceCleanup(valuesToReload);
142 for (mlir::Value *valPtr : valuesToReload) {
143 mlir::Value val = *valPtr;
148 Address temp = cgf.createDefaultAlignTempAlloca(val.getType(), val.getLoc(),
150 tempAllocas.push_back(temp);
151 cgf.builder.createStore(val.getLoc(), val, temp);
156 cleanups.forceCleanup();
160 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
161 mlir::Block &lastBodyBlock = scope.getBodyRegion().back();
162 cgf.builder.setInsertionPointToEnd(&lastBodyBlock);
163 if (lastBodyBlock.empty() ||
164 !lastBodyBlock.back().hasTrait<mlir::OpTrait::IsTerminator>())
165 cgf.builder.createYield(scope.getLoc());
170 mlir::OpBuilder::InsertionGuard guard(cgf.builder);
171 mlir::Block &cleanupBlock = scope.getCleanupRegion().front();
172 cgf.builder.setInsertionPointToEnd(&cleanupBlock);
174 if (hasDeferredCleanups) {
176 cgf.deferredConditionalCleanupStack.begin() + oldSize,
177 cgf.deferredConditionalCleanupStack.end()))) {
178 if (entry.activeFlag.isValid()) {
180 cgf.builder.createLoad(scope.getLoc(), entry.activeFlag);
182 cgf.builder, scope.getLoc(), flag,
false,
183 [&](mlir::OpBuilder &
b, mlir::Location loc) {
184 cgf.emitDestroy(entry.addr, entry.type, entry.destroyer);
185 cgf.builder.createYield(loc);
188 cgf.emitDestroy(entry.addr, entry.type, entry.destroyer);
192 cgf.builder.createYield(scope.getLoc());
195 cgf.deferredConditionalCleanupStack.truncate(oldSize);
196 cgf.builder.setInsertionPointAfter(scope);
199 for (
auto [addr, valPtr] : llvm::zip(tempAllocas, valuesToReload)) {
202 *valPtr = cgf.builder.createLoad(valPtr->getLoc(), addr);
210void EHScopeStack::Cleanup::anchor() {}
214 stable_iterator si = getInnermostNormalCleanup();
215 stable_iterator se = stable_end();
220 si =
cleanup.getEnclosingNormalCleanup();
226char *EHScopeStack::allocate(
size_t size) {
227 size = llvm::alignTo(size, ScopeStackAlignment);
228 if (!startOfBuffer) {
229 unsigned capacity = llvm::PowerOf2Ceil(std::max<size_t>(size, 1024ul));
230 startOfBuffer = std::make_unique<char[]>(capacity);
231 startOfData = endOfBuffer = startOfBuffer.get() + capacity;
232 }
else if (
static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
233 unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
234 unsigned usedCapacity =
235 currentCapacity - (startOfData - startOfBuffer.get());
236 unsigned requiredCapacity = usedCapacity + size;
239 unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
241 std::unique_ptr<char[]> newStartOfBuffer =
242 std::make_unique<char[]>(newCapacity);
243 char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
244 char *newStartOfData = newEndOfBuffer - usedCapacity;
245 memcpy(newStartOfData, startOfData, usedCapacity);
246 startOfBuffer.swap(newStartOfBuffer);
247 endOfBuffer = newEndOfBuffer;
248 startOfData = newStartOfData;
251 assert(startOfBuffer.get() + size <= startOfData);
256void EHScopeStack::deallocate(
size_t size) {
257 startOfData += llvm::alignTo(size, ScopeStackAlignment);
265 bool skipCleanupScope =
false;
267 cir::CleanupKind cleanupKind = cir::CleanupKind::All;
268 if (isEHCleanup && cgf->getLangOpts().Exceptions) {
270 isNormalCleanup ? cir::CleanupKind::All : cir::CleanupKind::EH;
276 cleanupKind = cir::CleanupKind::Normal;
278 skipCleanupScope =
true;
281 cir::CleanupScopeOp cleanupScope =
nullptr;
282 if (!skipCleanupScope) {
283 CIRGenBuilderTy &builder = cgf->getBuilder();
284 mlir::Location loc = builder.getUnknownLoc();
285 cleanupScope = cir::CleanupScopeOp::create(
286 builder, loc, cleanupKind,
288 [&](mlir::OpBuilder &
b, mlir::Location loc) {
292 [&](mlir::OpBuilder &
b, mlir::Location loc) {
296 builder.setInsertionPointToEnd(&cleanupScope.getBodyRegion().back());
303 if (innermostEHScope != stable_end() &&
307 EHCleanupScope *scope =
new (buffer)
308 EHCleanupScope(isNormalCleanup, isEHCleanup, size, cleanupScope,
309 innermostNormalCleanup, innermostEHScope);
312 innermostNormalCleanup = stable_begin();
315 innermostEHScope = stable_begin();
317 if (isLifetimeMarker)
318 cgf->cgm.errorNYI(
"push lifetime marker cleanup");
321 if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
322 cgf->getTarget().getCXXABI().isMicrosoft())
323 cgf->cgm.errorNYI(
"push seh cleanup");
329 assert(!empty() &&
"popping exception stack when not empty");
333 innermostNormalCleanup =
cleanup.getEnclosingNormalCleanup();
334 innermostEHScope =
cleanup.getEnclosingEHScope();
335 deallocate(
cleanup.getAllocatedSize());
337 cir::CleanupScopeOp cleanupScope =
cleanup.getCleanupScopeOp();
339 auto *block = &cleanupScope.getBodyRegion().back();
340 if (!block->mightHaveTerminator()) {
341 mlir::OpBuilder::InsertionGuard guard(cgf->getBuilder());
342 cgf->getBuilder().setInsertionPointToEnd(block);
343 cir::YieldOp::create(cgf->getBuilder(),
344 cgf->getBuilder().getUnknownLoc());
346 cgf->getBuilder().setInsertionPointAfter(cleanupScope);
354 for (stable_iterator si = getInnermostEHScope(); si != stable_end();) {
355 if (
auto *
cleanup = dyn_cast<EHCleanupScope>(&*find(si))) {
356 if (
cleanup->isLifetimeMarker()) {
371 mlir::Operation *dominatingIP) {
375 "cleanup block is neither normal nor EH?");
386 if (!var.isValid()) {
387 mlir::Location loc = builder.getUnknownLoc();
390 loc,
"cleanup.isactive");
393 assert(dominatingIP &&
"no existing variable and no dominating IP!");
396 mlir::Value val = builder.
getBool(
true, loc);
399 mlir::OpBuilder::InsertionGuard guard(builder);
400 builder.setInsertionPoint(dominatingIP);
408 mlir::Location loc = builder.getUnknownLoc();
414 mlir::Operation *dominatingIP) {
415 assert(
c !=
ehStack.stable_end() &&
"deactivating bottom of stack?");
417 assert(scope.
isActive() &&
"double deactivation");
434 EHScopeStack::Cleanup *
cleanup,
438 mlir::Block &block = cleanupScope.getCleanupRegion().back();
440 mlir::OpBuilder::InsertionGuard guard(builder);
441 builder.setInsertionPointToStart(&block);
447 mlir::Location loc = cleanupScope.getLoc();
449 cir::IfOp::create(builder, loc, isActive,
452 [&](mlir::OpBuilder &, mlir::Location) {
455 "cleanup ended with no insertion point?");
460 assert(cgf.
haveInsertPoint() &&
"cleanup ended with no insertion point?");
463 mlir::Block &cleanupRegionLastBlock = cleanupScope.getCleanupRegion().back();
464 if (cleanupRegionLastBlock.empty() ||
465 !cleanupRegionLastBlock.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
466 mlir::OpBuilder::InsertionGuard guardCase(builder);
467 builder.setInsertionPointToEnd(&cleanupRegionLastBlock);
478 .walk([&](mlir::Operation *op) {
480 return mlir::WalkResult::interrupt();
481 return mlir::WalkResult::advance();
491 assert(!
ehStack.empty() &&
"cleanup stack is empty!");
496 assert(cleanupScope &&
"CleanupScopeOp is nullptr");
515 if (forDeactivation && requiresNormalCleanup) {
522 mlir::Location loc = builder.getUnknownLoc();
529 mlir::OpBuilder::InsertionGuard guard(builder);
530 builder.setInsertionPoint(cleanupScope);
531 builder.createFlagStore(loc,
true, activeFlag.
getPointer());
535 assert(builder.getInsertionBlock() ==
536 &cleanupScope.getBodyRegion().back() &&
537 "expected insertion point in cleanup body");
538 builder.createFlagStore(loc,
false, activeFlag.
getPointer());
545 if (requiresEHCleanup)
546 cleanupScope.setCleanupKind(cir::CleanupKind::EH);
547 requiresNormalCleanup =
false;
559 if (!requiresNormalCleanup && !requiresEHCleanup) {
564 mlir::Block &cleanupBlock = cleanupScope.getCleanupRegion().back();
565 if (!cleanupBlock.mightHaveTerminator()) {
566 mlir::OpBuilder::InsertionGuard guard(builder);
567 builder.setInsertionPointToEnd(&cleanupBlock);
568 cir::YieldOp::create(builder, builder.getUnknownLoc());
581 cleanupBufferStack[8 *
sizeof(
void *)];
582 std::unique_ptr<char[]> cleanupBufferHeap;
588 if (cleanupSize <=
sizeof(cleanupBufferStack)) {
589 memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
592 cleanupBufferHeap.reset(
new char[cleanupSize]);
593 memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
605 Address cleanupActiveFlag = normalActiveFlag.
isValid() ? normalActiveFlag
606 : ehActiveFlag.
isValid() ? ehActiveFlag
624 bool requiresCleanup =
false;
625 for (
auto it =
ehStack.begin(), ie =
ehStack.find(oldCleanupStackDepth);
628 requiresCleanup =
true;
637 if (requiresCleanup) {
638 for (mlir::Value *valPtr : valuesToReload) {
639 mlir::Value val = *valPtr;
647 tempAllocas.push_back(temp);
648 builder.createStore(val.getLoc(), val, temp);
654 while (
ehStack.stable_begin() != oldCleanupStackDepth)
658 if (requiresCleanup) {
659 for (
auto [addr, valPtr] : llvm::zip(tempAllocas, valuesToReload)) {
660 mlir::Location loc = valPtr->getLoc();
661 *valPtr = builder.createLoad(loc, addr);
static void setupCleanupBlockDeactivation(CIRGenFunction &cgf, EHScopeStack::stable_iterator c, mlir::Operation *dominatingIP)
The given cleanup block is being deactivated.
static bool bodyHasBranchThroughExits(mlir::Region &bodyRegion)
Check whether a cleanup scope body contains any non-yield exits that branch through the cleanup.
static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope, EHScopeStack::Cleanup *cleanup, EHScopeStack::Cleanup::Flags flags, Address activeFlag)
static Decl::Kind getKind(const Decl *D)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__DEVICE__ void * memcpy(void *__a, const void *__b, size_t __c)
__device__ __2f16 float c
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::StoreOp createFlagStore(mlir::Location loc, bool val, mlir::Value dst)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::LoadOp createFlagLoad(mlir::Location loc, mlir::Value addr)
Emit a load from an boolean flag variable.
cir::BoolType getBoolTy()
mlir::Value getPointer() const
FullExprCleanupScope(CIRGenFunction &cgf, const Expr *subExpr)
void exit(ArrayRef< mlir::Value * > valuesToReload={})
llvm::SmallVector< PendingCleanupEntry > lifetimeExtendedCleanupStack
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
mlir::Block * getCurFunctionEntryBlock()
bool isInConditionalBranch() const
void setBeforeOutermostConditional(mlir::Value value, Address addr)
ConditionalEvaluation * outermostConditional
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
llvm::SmallVector< PendingCleanupEntry > deferredConditionalCleanupStack
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
static Destroyer destroyCXXObject
void initFullExprCleanupWithFlag(Address activeFlag)
Address createCleanupActiveFlag()
Create an active flag variable for use with conditional cleanups.
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
bool haveInsertPoint() const
True if an insertion point is defined.
void emitCXXTemporary(const CXXTemporary *temporary, QualType tempType, Address ptr)
Emits all the code to cause the given temporary to be cleaned up.
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
CIRGenBuilderTy & getBuilder()
void pushPendingCleanupToEHStack(const PendingCleanupEntry &entry)
Promote a single pending cleanup entry onto the EH scope stack.
void popCleanupBlock(bool forDeactivation=false)
Pop a cleanup block from the stack.
EHScopeStack::stable_iterator currentCleanupStackDepth
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
Address createDefaultAlignTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name)
CreateDefaultAlignTempAlloca - This creates an alloca with the default alignment of the corresponding...
A cleanup scope which generates the cleanup blocks lazily.
void setTestFlagInEHCleanup(bool value)
void setTestFlagInNormalCleanup(bool value)
Address getActiveFlag() const
size_t getCleanupSize() const
cir::CleanupScopeOp getCleanupScopeOp()
bool shouldTestFlagInEHCleanup() const
static size_t getSizeForCleanupSize(size_t size)
Gets the size required for a lazy cleanup scope with the given cleanup-data requirements.
bool isNormalCleanup() const
void setActiveFlag(Address var)
void * getCleanupBuffer()
bool shouldTestFlagInNormalCleanup() const
void setActive(bool isActive)
void setIsEHCleanupKind()
void setIsNormalCleanupKind()
Information for lazily generating a cleanup.
A saved depth on the scope stack.
void popCleanup()
Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
iterator find(stable_iterator savePoint) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
bool requiresCatchOrCleanup() const
stable_iterator getInnermostActiveNormalCleanup() const
Represents a C++ temporary.
static CharUnits One()
One - Construct a CharUnits quantity of one.
This represents one expression.
A (possibly-)qualified type.
A class that does preorder or postorder depth-first traversal on the entire Clang AST and visits each...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
U cast(CodeGen::Address addr)
static bool emitLifetimeMarkers()
A cleanup entry that will be promoted onto the EH scope stack at a later point.