clang 22.0.0git
CIRGenStmt.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Stmt nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15
16#include "mlir/IR/Builders.h"
17#include "mlir/IR/Location.h"
18#include "mlir/Support/LLVM.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/Stmt.h"
23
24using namespace clang;
25using namespace clang::CIRGen;
26using namespace cir;
27
28static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf,
29 const Stmt *exprResult,
30 AggValueSlot slot,
31 Address *lastValue) {
32 // We have to special case labels here. They are statements, but when put
33 // at the end of a statement expression, they yield the value of their
34 // subexpression. Handle this by walking through all labels we encounter,
35 // emitting them before we evaluate the subexpr.
36 // Similar issues arise for attributed statements.
37 while (!isa<Expr>(exprResult)) {
38 if (const auto *ls = dyn_cast<LabelStmt>(exprResult)) {
39 if (cgf.emitLabel(*ls->getDecl()).failed())
40 return mlir::failure();
41 exprResult = ls->getSubStmt();
42 } else if (const auto *as = dyn_cast<AttributedStmt>(exprResult)) {
43 // FIXME: Update this if we ever have attributes that affect the
44 // semantics of an expression.
45 exprResult = as->getSubStmt();
46 } else {
47 llvm_unreachable("Unknown value statement");
48 }
49 }
50
51 const Expr *e = cast<Expr>(exprResult);
52 QualType exprTy = e->getType();
53 if (cgf.hasAggregateEvaluationKind(exprTy)) {
54 cgf.emitAggExpr(e, slot);
55 } else {
56 // We can't return an RValue here because there might be cleanups at
57 // the end of the StmtExpr. Because of that, we have to emit the result
58 // here into a temporary alloca.
59 cgf.emitAnyExprToMem(e, *lastValue, Qualifiers(),
60 /*IsInit*/ false);
61 }
62
63 return mlir::success();
64}
65
67 const CompoundStmt &s, Address *lastValue, AggValueSlot slot) {
68 mlir::LogicalResult result = mlir::success();
69 const Stmt *exprResult = s.getStmtExprResult();
70 assert((!lastValue || (lastValue && exprResult)) &&
71 "If lastValue is not null then the CompoundStmt must have a "
72 "StmtExprResult");
73
74 for (const Stmt *curStmt : s.body()) {
75 const bool saveResult = lastValue && exprResult == curStmt;
76 if (saveResult) {
77 if (emitStmtWithResult(*this, exprResult, slot, lastValue).failed())
78 result = mlir::failure();
79 } else {
80 if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
81 result = mlir::failure();
82 }
83 }
84 return result;
85}
86
88 Address *lastValue,
89 AggValueSlot slot) {
90 // Add local scope to track new declared variables.
92 mlir::Location scopeLoc = getLoc(s.getSourceRange());
93 mlir::OpBuilder::InsertPoint scopeInsPt;
94 cir::ScopeOp::create(
95 builder, scopeLoc,
96 [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
97 scopeInsPt = b.saveInsertionPoint();
98 });
99 mlir::OpBuilder::InsertionGuard guard(builder);
100 builder.restoreInsertionPoint(scopeInsPt);
101 LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
102 return emitCompoundStmtWithoutScope(s, lastValue, slot);
103}
104
108
109// Build CIR for a statement. useCurrentScope should be true if no new scopes
110// need to be created when finding a compound statement.
111mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
112 bool useCurrentScope,
114 if (mlir::succeeded(emitSimpleStmt(s, useCurrentScope)))
115 return mlir::success();
116
117 switch (s->getStmtClass()) {
119 case Stmt::CXXCatchStmtClass:
120 case Stmt::SEHExceptStmtClass:
121 case Stmt::SEHFinallyStmtClass:
122 case Stmt::MSDependentExistsStmtClass:
123 llvm_unreachable("invalid statement class to emit generically");
124 case Stmt::BreakStmtClass:
125 case Stmt::NullStmtClass:
126 case Stmt::CompoundStmtClass:
127 case Stmt::ContinueStmtClass:
128 case Stmt::DeclStmtClass:
129 case Stmt::ReturnStmtClass:
130 llvm_unreachable("should have emitted these statements as simple");
131
132#define STMT(Type, Base)
133#define ABSTRACT_STMT(Op)
134#define EXPR(Type, Base) case Stmt::Type##Class:
135#include "clang/AST/StmtNodes.inc"
136 {
137 assert(builder.getInsertionBlock() &&
138 "expression emission must have an insertion point");
139
141
142 // Classic codegen has a check here to see if the emitter created a new
143 // block that isn't used (comparing the incoming and outgoing insertion
144 // points) and deletes the outgoing block if it's not used. In CIR, we
145 // will handle that during the cir.canonicalize pass.
146 return mlir::success();
147 }
148 case Stmt::IfStmtClass:
149 return emitIfStmt(cast<IfStmt>(*s));
150 case Stmt::SwitchStmtClass:
152 case Stmt::ForStmtClass:
153 return emitForStmt(cast<ForStmt>(*s));
154 case Stmt::WhileStmtClass:
156 case Stmt::DoStmtClass:
157 return emitDoStmt(cast<DoStmt>(*s));
158 case Stmt::CXXTryStmtClass:
160 case Stmt::CXXForRangeStmtClass:
162 case Stmt::OpenACCComputeConstructClass:
164 case Stmt::OpenACCLoopConstructClass:
166 case Stmt::OpenACCCombinedConstructClass:
168 case Stmt::OpenACCDataConstructClass:
170 case Stmt::OpenACCEnterDataConstructClass:
172 case Stmt::OpenACCExitDataConstructClass:
174 case Stmt::OpenACCHostDataConstructClass:
176 case Stmt::OpenACCWaitConstructClass:
178 case Stmt::OpenACCInitConstructClass:
180 case Stmt::OpenACCShutdownConstructClass:
182 case Stmt::OpenACCSetConstructClass:
184 case Stmt::OpenACCUpdateConstructClass:
186 case Stmt::OpenACCCacheConstructClass:
188 case Stmt::OpenACCAtomicConstructClass:
190 case Stmt::GCCAsmStmtClass:
191 case Stmt::MSAsmStmtClass:
192 return emitAsmStmt(cast<AsmStmt>(*s));
193 case Stmt::OMPScopeDirectiveClass:
194 case Stmt::OMPErrorDirectiveClass:
195 case Stmt::LabelStmtClass:
196 case Stmt::AttributedStmtClass:
197 case Stmt::GotoStmtClass:
198 case Stmt::DefaultStmtClass:
199 case Stmt::CaseStmtClass:
200 case Stmt::SEHLeaveStmtClass:
201 case Stmt::SYCLKernelCallStmtClass:
202 case Stmt::CoroutineBodyStmtClass:
204 case Stmt::CoreturnStmtClass:
205 case Stmt::IndirectGotoStmtClass:
206 case Stmt::OMPParallelDirectiveClass:
207 case Stmt::OMPTaskwaitDirectiveClass:
208 case Stmt::OMPTaskyieldDirectiveClass:
209 case Stmt::OMPBarrierDirectiveClass:
210 case Stmt::CapturedStmtClass:
211 case Stmt::ObjCAtTryStmtClass:
212 case Stmt::ObjCAtThrowStmtClass:
213 case Stmt::ObjCAtSynchronizedStmtClass:
214 case Stmt::ObjCForCollectionStmtClass:
215 case Stmt::ObjCAutoreleasePoolStmtClass:
216 case Stmt::SEHTryStmtClass:
217 case Stmt::OMPMetaDirectiveClass:
218 case Stmt::OMPCanonicalLoopClass:
219 case Stmt::OMPSimdDirectiveClass:
220 case Stmt::OMPTileDirectiveClass:
221 case Stmt::OMPUnrollDirectiveClass:
222 case Stmt::OMPFuseDirectiveClass:
223 case Stmt::OMPForDirectiveClass:
224 case Stmt::OMPForSimdDirectiveClass:
225 case Stmt::OMPSectionsDirectiveClass:
226 case Stmt::OMPSectionDirectiveClass:
227 case Stmt::OMPSingleDirectiveClass:
228 case Stmt::OMPMasterDirectiveClass:
229 case Stmt::OMPCriticalDirectiveClass:
230 case Stmt::OMPParallelForDirectiveClass:
231 case Stmt::OMPParallelForSimdDirectiveClass:
232 case Stmt::OMPParallelMasterDirectiveClass:
233 case Stmt::OMPParallelSectionsDirectiveClass:
234 case Stmt::OMPTaskDirectiveClass:
235 case Stmt::OMPTaskgroupDirectiveClass:
236 case Stmt::OMPFlushDirectiveClass:
237 case Stmt::OMPDepobjDirectiveClass:
238 case Stmt::OMPScanDirectiveClass:
239 case Stmt::OMPOrderedDirectiveClass:
240 case Stmt::OMPAtomicDirectiveClass:
241 case Stmt::OMPTargetDirectiveClass:
242 case Stmt::OMPTeamsDirectiveClass:
243 case Stmt::OMPCancellationPointDirectiveClass:
244 case Stmt::OMPCancelDirectiveClass:
245 case Stmt::OMPTargetDataDirectiveClass:
246 case Stmt::OMPTargetEnterDataDirectiveClass:
247 case Stmt::OMPTargetExitDataDirectiveClass:
248 case Stmt::OMPTargetParallelDirectiveClass:
249 case Stmt::OMPTargetParallelForDirectiveClass:
250 case Stmt::OMPTaskLoopDirectiveClass:
251 case Stmt::OMPTaskLoopSimdDirectiveClass:
252 case Stmt::OMPMaskedTaskLoopDirectiveClass:
253 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
254 case Stmt::OMPMasterTaskLoopDirectiveClass:
255 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
256 case Stmt::OMPParallelGenericLoopDirectiveClass:
257 case Stmt::OMPParallelMaskedDirectiveClass:
258 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
259 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
260 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
261 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
262 case Stmt::OMPDistributeDirectiveClass:
263 case Stmt::OMPDistributeParallelForDirectiveClass:
264 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
265 case Stmt::OMPDistributeSimdDirectiveClass:
266 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
267 case Stmt::OMPTargetParallelForSimdDirectiveClass:
268 case Stmt::OMPTargetSimdDirectiveClass:
269 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
270 case Stmt::OMPTargetUpdateDirectiveClass:
271 case Stmt::OMPTeamsDistributeDirectiveClass:
272 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
273 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
274 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
275 case Stmt::OMPTeamsGenericLoopDirectiveClass:
276 case Stmt::OMPTargetTeamsDirectiveClass:
277 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
278 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
279 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
280 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
281 case Stmt::OMPInteropDirectiveClass:
282 case Stmt::OMPDispatchDirectiveClass:
283 case Stmt::OMPGenericLoopDirectiveClass:
284 case Stmt::OMPReverseDirectiveClass:
285 case Stmt::OMPInterchangeDirectiveClass:
286 case Stmt::OMPAssumeDirectiveClass:
287 case Stmt::OMPMaskedDirectiveClass:
288 case Stmt::OMPStripeDirectiveClass:
289 case Stmt::ObjCAtCatchStmtClass:
290 case Stmt::ObjCAtFinallyStmtClass:
291 cgm.errorNYI(s->getSourceRange(),
292 std::string("emitStmt: ") + s->getStmtClassName());
293 return mlir::failure();
294 }
295
296 llvm_unreachable("Unexpected statement class");
297}
298
299mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
300 bool useCurrentScope) {
301 switch (s->getStmtClass()) {
302 default:
303 return mlir::failure();
304 case Stmt::DeclStmtClass:
305 return emitDeclStmt(cast<DeclStmt>(*s));
306 case Stmt::CompoundStmtClass:
307 if (useCurrentScope)
310 case Stmt::GotoStmtClass:
311 return emitGotoStmt(cast<GotoStmt>(*s));
312 case Stmt::ContinueStmtClass:
314
315 // NullStmt doesn't need any handling, but we need to say we handled it.
316 case Stmt::NullStmtClass:
317 break;
318
319 case Stmt::LabelStmtClass:
321 case Stmt::CaseStmtClass:
322 case Stmt::DefaultStmtClass:
323 // If we reached here, we must not handling a switch case in the top level.
325 /*buildingTopLevelCase=*/false);
326 break;
327
328 case Stmt::BreakStmtClass:
330 case Stmt::ReturnStmtClass:
332 }
333
334 return mlir::success();
335}
336
338
339 if (emitLabel(*s.getDecl()).failed())
340 return mlir::failure();
341
342 if (getContext().getLangOpts().EHAsynch && s.isSideEntry())
343 getCIRGenModule().errorNYI(s.getSourceRange(), "IsEHa: not implemented.");
344
345 return emitStmt(s.getSubStmt(), /*useCurrentScope*/ true);
346}
347
348// Add a terminating yield on a body region if no other terminators are used.
349static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r,
350 mlir::Location loc) {
351 if (r.empty())
352 return;
353
355 unsigned numBlocks = r.getBlocks().size();
356 for (auto &block : r.getBlocks()) {
357 // Already cleanup after return operations, which might create
358 // empty blocks if emitted as last stmt.
359 if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() &&
360 block.hasNoSuccessors())
361 eraseBlocks.push_back(&block);
362
363 if (block.empty() ||
364 !block.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
365 mlir::OpBuilder::InsertionGuard guardCase(builder);
366 builder.setInsertionPointToEnd(&block);
367 builder.createYield(loc);
368 }
369 }
370
371 for (auto *b : eraseBlocks)
372 b->erase();
373}
374
375mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
376 mlir::LogicalResult res = mlir::success();
377 // The else branch of a consteval if statement is always the only branch
378 // that can be runtime evaluated.
379 const Stmt *constevalExecuted;
380 if (s.isConsteval()) {
381 constevalExecuted = s.isNegatedConsteval() ? s.getThen() : s.getElse();
382 if (!constevalExecuted) {
383 // No runtime code execution required
384 return res;
385 }
386 }
387
388 // C99 6.8.4.1: The first substatement is executed if the expression
389 // compares unequal to 0. The condition must be a scalar type.
390 auto ifStmtBuilder = [&]() -> mlir::LogicalResult {
391 if (s.isConsteval())
392 return emitStmt(constevalExecuted, /*useCurrentScope=*/true);
393
394 if (s.getInit())
395 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
396 return mlir::failure();
397
398 if (s.getConditionVariable())
399 emitDecl(*s.getConditionVariable());
400
401 // If the condition folds to a constant and this is an 'if constexpr',
402 // we simplify it early in CIRGen to avoid emitting the full 'if'.
403 bool condConstant;
404 if (constantFoldsToBool(s.getCond(), condConstant, s.isConstexpr())) {
405 if (s.isConstexpr()) {
406 // Handle "if constexpr" explicitly here to avoid generating some
407 // ill-formed code since in CIR the "if" is no longer simplified
408 // in this lambda like in Clang but postponed to other MLIR
409 // passes.
410 if (const Stmt *executed = condConstant ? s.getThen() : s.getElse())
411 return emitStmt(executed, /*useCurrentScope=*/true);
412 // There is nothing to execute at runtime.
413 // TODO(cir): there is still an empty cir.scope generated by the caller.
414 return mlir::success();
415 }
416 }
417
420 return emitIfOnBoolExpr(s.getCond(), s.getThen(), s.getElse());
421 };
422
423 // TODO: Add a new scoped symbol table.
424 // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
425 // The if scope contains the full source range for IfStmt.
426 mlir::Location scopeLoc = getLoc(s.getSourceRange());
427 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
428 [&](mlir::OpBuilder &b, mlir::Location loc) {
429 LexicalScope lexScope{*this, scopeLoc,
430 builder.getInsertionBlock()};
431 res = ifStmtBuilder();
432 });
433
434 return res;
435}
436
437mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
438 assert(builder.getInsertionBlock() && "expected valid insertion point");
439
440 for (const Decl *i : s.decls())
441 emitDecl(*i, /*evaluateConditionDecl=*/true);
442
443 return mlir::success();
444}
445
446mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
447 mlir::Location loc = getLoc(s.getSourceRange());
448 const Expr *rv = s.getRetValue();
449
450 RunCleanupsScope cleanupScope(*this);
451 bool createNewScope = false;
452 if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
453 rv = ewc->getSubExpr();
454 createNewScope = true;
455 }
456
457 auto handleReturnVal = [&]() {
458 if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
459 s.getNRVOCandidate()->isNRVOVariable()) {
462 } else if (!rv) {
463 // No return expression. Do nothing.
464 } else if (rv->getType()->isVoidType()) {
465 // Make sure not to return anything, but evaluate the expression
466 // for side effects.
467 if (rv) {
468 emitAnyExpr(rv);
469 }
470 } else if (cast<FunctionDecl>(curGD.getDecl())
471 ->getReturnType()
472 ->isReferenceType()) {
473 // If this function returns a reference, take the address of the
474 // expression rather than the value.
476 builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
477 *fnRetAlloca);
478 } else {
479 mlir::Value value = nullptr;
481 case cir::TEK_Scalar:
482 value = emitScalarExpr(rv);
483 if (value) { // Change this to an assert once emitScalarExpr is complete
484 builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
485 }
486 break;
487 case cir::TEK_Complex:
490 /*isInit=*/true);
491 break;
498 break;
499 }
500 }
501 };
502
503 if (!createNewScope) {
504 handleReturnVal();
505 } else {
506 mlir::Location scopeLoc =
507 getLoc(rv ? rv->getSourceRange() : s.getSourceRange());
508 // First create cir.scope and later emit it's body. Otherwise all CIRGen
509 // dispatched by `handleReturnVal()` might needs to manipulate blocks and
510 // look into parents, which are all unlinked.
511 mlir::OpBuilder::InsertPoint scopeBody;
512 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
513 [&](mlir::OpBuilder &b, mlir::Location loc) {
514 scopeBody = b.saveInsertionPoint();
515 });
516 {
517 mlir::OpBuilder::InsertionGuard guard(builder);
518 builder.restoreInsertionPoint(scopeBody);
519 CIRGenFunction::LexicalScope lexScope{*this, scopeLoc,
520 builder.getInsertionBlock()};
521 handleReturnVal();
522 }
523 }
524
525 cleanupScope.forceCleanup();
526
527 // In CIR we might have returns in different scopes.
528 // FIXME(cir): cleanup code is handling actual return emission, the logic
529 // should try to match traditional codegen more closely (to the extent which
530 // is possible).
531 auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
533
534 // Insert the new block to continue codegen after branch to ret block.
535 builder.createBlock(builder.getBlock()->getParent());
536
537 return mlir::success();
538}
539
540mlir::LogicalResult CIRGenFunction::emitGotoStmt(const clang::GotoStmt &s) {
541 // FIXME: LLVM codegen inserts emit a stop point here for debug info
542 // sake when the insertion point is available, but doesn't do
543 // anything special when there isn't. We haven't implemented debug
544 // info support just yet, look at this again once we have it.
546
547 cir::GotoOp::create(builder, getLoc(s.getSourceRange()),
548 s.getLabel()->getName());
549
550 // A goto marks the end of a block, create a new one for codegen after
551 // emitGotoStmt can resume building in that block.
552 // Insert the new block to continue codegen after goto.
553 builder.createBlock(builder.getBlock()->getParent());
554
555 return mlir::success();
556}
557
558mlir::LogicalResult
560 builder.createContinue(getLoc(s.getKwLoc()));
561
562 // Insert the new block to continue codegen after the continue statement.
563 builder.createBlock(builder.getBlock()->getParent());
564
565 return mlir::success();
566}
567
568mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) {
569 // Create a new block to tag with a label and add a branch from
570 // the current one to it. If the block is empty just call attach it
571 // to this label.
572 mlir::Block *currBlock = builder.getBlock();
573 mlir::Block *labelBlock = currBlock;
574
575 if (!currBlock->empty() || currBlock->isEntryBlock()) {
576 {
577 mlir::OpBuilder::InsertionGuard guard(builder);
578 labelBlock = builder.createBlock(builder.getBlock()->getParent());
579 }
580 cir::BrOp::create(builder, getLoc(d.getSourceRange()), labelBlock);
581 }
582
583 builder.setInsertionPointToEnd(labelBlock);
584 cir::LabelOp::create(builder, getLoc(d.getSourceRange()), d.getName());
585 builder.setInsertionPointToEnd(labelBlock);
586
587 // FIXME: emit debug info for labels, incrementProfileCounter
591 return mlir::success();
592}
593
595 builder.createBreak(getLoc(s.getKwLoc()));
596
597 // Insert the new block to continue codegen after the break statement.
598 builder.createBlock(builder.getBlock()->getParent());
599
600 return mlir::success();
601}
602
603template <typename T>
604mlir::LogicalResult
606 mlir::ArrayAttr value, CaseOpKind kind,
607 bool buildingTopLevelCase) {
608
610 "only case or default stmt go here");
611
612 mlir::LogicalResult result = mlir::success();
613
614 mlir::Location loc = getLoc(stmt->getBeginLoc());
615
616 enum class SubStmtKind { Case, Default, Other };
617 SubStmtKind subStmtKind = SubStmtKind::Other;
618 const Stmt *sub = stmt->getSubStmt();
619
620 mlir::OpBuilder::InsertPoint insertPoint;
621 CaseOp::create(builder, loc, value, kind, insertPoint);
622
623 {
624 mlir::OpBuilder::InsertionGuard guardSwitch(builder);
625 builder.restoreInsertionPoint(insertPoint);
626
627 if (isa<DefaultStmt>(sub) && isa<CaseStmt>(stmt)) {
628 subStmtKind = SubStmtKind::Default;
629 builder.createYield(loc);
630 } else if (isa<CaseStmt>(sub) && isa<DefaultStmt, CaseStmt>(stmt)) {
631 subStmtKind = SubStmtKind::Case;
632 builder.createYield(loc);
633 } else {
634 result = emitStmt(sub, /*useCurrentScope=*/!isa<CompoundStmt>(sub));
635 }
636
637 insertPoint = builder.saveInsertionPoint();
638 }
639
640 // If the substmt is default stmt or case stmt, try to handle the special case
641 // to make it into the simple form. e.g.
642 //
643 // swtich () {
644 // case 1:
645 // default:
646 // ...
647 // }
648 //
649 // we prefer generating
650 //
651 // cir.switch() {
652 // cir.case(equal, 1) {
653 // cir.yield
654 // }
655 // cir.case(default) {
656 // ...
657 // }
658 // }
659 //
660 // than
661 //
662 // cir.switch() {
663 // cir.case(equal, 1) {
664 // cir.case(default) {
665 // ...
666 // }
667 // }
668 // }
669 //
670 // We don't need to revert this if we find the current switch can't be in
671 // simple form later since the conversion itself should be harmless.
672 if (subStmtKind == SubStmtKind::Case) {
673 result = emitCaseStmt(*cast<CaseStmt>(sub), condType, buildingTopLevelCase);
674 } else if (subStmtKind == SubStmtKind::Default) {
675 result = emitDefaultStmt(*cast<DefaultStmt>(sub), condType,
676 buildingTopLevelCase);
677 } else if (buildingTopLevelCase) {
678 // If we're building a top level case, try to restore the insert point to
679 // the case we're building, then we can attach more random stmts to the
680 // case to make generating `cir.switch` operation to be a simple form.
681 builder.restoreInsertionPoint(insertPoint);
682 }
683
684 return result;
685}
686
687mlir::LogicalResult CIRGenFunction::emitCaseStmt(const CaseStmt &s,
688 mlir::Type condType,
689 bool buildingTopLevelCase) {
690 cir::CaseOpKind kind;
691 mlir::ArrayAttr value;
692 llvm::APSInt intVal = s.getLHS()->EvaluateKnownConstInt(getContext());
693
694 // If the case statement has an RHS value, it is representing a GNU
695 // case range statement, where LHS is the beginning of the range
696 // and RHS is the end of the range.
697 if (const Expr *rhs = s.getRHS()) {
698 llvm::APSInt endVal = rhs->EvaluateKnownConstInt(getContext());
699 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal),
700 cir::IntAttr::get(condType, endVal)});
701 kind = cir::CaseOpKind::Range;
702 } else {
703 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal)});
704 kind = cir::CaseOpKind::Equal;
705 }
706
707 return emitCaseDefaultCascade(&s, condType, value, kind,
708 buildingTopLevelCase);
709}
710
712 mlir::Type condType,
713 bool buildingTopLevelCase) {
714 return emitCaseDefaultCascade(&s, condType, builder.getArrayAttr({}),
715 cir::CaseOpKind::Default, buildingTopLevelCase);
716}
717
718mlir::LogicalResult CIRGenFunction::emitSwitchCase(const SwitchCase &s,
719 bool buildingTopLevelCase) {
720 assert(!condTypeStack.empty() &&
721 "build switch case without specifying the type of the condition");
722
723 if (s.getStmtClass() == Stmt::CaseStmtClass)
725 buildingTopLevelCase);
726
727 if (s.getStmtClass() == Stmt::DefaultStmtClass)
729 buildingTopLevelCase);
730
731 llvm_unreachable("expect case or default stmt");
732}
733
734mlir::LogicalResult
736 ArrayRef<const Attr *> forAttrs) {
737 cir::ForOp forOp;
738
739 // TODO(cir): pass in array of attributes.
740 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
741 mlir::LogicalResult loopRes = mlir::success();
742 // Evaluate the first pieces before the loop.
743 if (s.getInit())
744 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
745 return mlir::failure();
746 if (emitStmt(s.getRangeStmt(), /*useCurrentScope=*/true).failed())
747 return mlir::failure();
748 if (emitStmt(s.getBeginStmt(), /*useCurrentScope=*/true).failed())
749 return mlir::failure();
750 if (emitStmt(s.getEndStmt(), /*useCurrentScope=*/true).failed())
751 return mlir::failure();
752
754 // From LLVM: if there are any cleanups between here and the loop-exit
755 // scope, create a block to stage a loop exit along.
756 // We probably already do the right thing because of ScopeOp, but make
757 // sure we handle all cases.
759
760 forOp = builder.createFor(
761 getLoc(s.getSourceRange()),
762 /*condBuilder=*/
763 [&](mlir::OpBuilder &b, mlir::Location loc) {
764 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
765 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
766 mlir::Value condVal = evaluateExprAsBool(s.getCond());
767 builder.createCondition(condVal);
768 },
769 /*bodyBuilder=*/
770 [&](mlir::OpBuilder &b, mlir::Location loc) {
771 // https://en.cppreference.com/w/cpp/language/for
772 // In C++ the scope of the init-statement and the scope of
773 // statement are one and the same.
774 bool useCurrentScope = true;
775 if (emitStmt(s.getLoopVarStmt(), useCurrentScope).failed())
776 loopRes = mlir::failure();
777 if (emitStmt(s.getBody(), useCurrentScope).failed())
778 loopRes = mlir::failure();
779 emitStopPoint(&s);
780 },
781 /*stepBuilder=*/
782 [&](mlir::OpBuilder &b, mlir::Location loc) {
783 if (s.getInc())
784 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
785 loopRes = mlir::failure();
786 builder.createYield(loc);
787 });
788 return loopRes;
789 };
790
791 mlir::LogicalResult res = mlir::success();
792 mlir::Location scopeLoc = getLoc(s.getSourceRange());
793 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
794 [&](mlir::OpBuilder &b, mlir::Location loc) {
795 // Create a cleanup scope for the condition
796 // variable cleanups. Logical equivalent from
797 // LLVM codegn for LexicalScope
798 // ConditionScope(*this, S.getSourceRange())...
799 LexicalScope lexScope{*this, loc,
800 builder.getInsertionBlock()};
801 res = forStmtBuilder();
802 });
803
804 if (res.failed())
805 return res;
806
807 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
808 return mlir::success();
809}
810
811mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &s) {
812 cir::ForOp forOp;
813
814 // TODO: pass in an array of attributes.
815 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
816 mlir::LogicalResult loopRes = mlir::success();
817 // Evaluate the first part before the loop.
818 if (s.getInit())
819 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
820 return mlir::failure();
822 // In the classic codegen, if there are any cleanups between here and the
823 // loop-exit scope, a block is created to stage the loop exit. We probably
824 // already do the right thing because of ScopeOp, but we need more testing
825 // to be sure we handle all cases.
827
828 forOp = builder.createFor(
829 getLoc(s.getSourceRange()),
830 /*condBuilder=*/
831 [&](mlir::OpBuilder &b, mlir::Location loc) {
832 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
833 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
834 mlir::Value condVal;
835 if (s.getCond()) {
836 // If the for statement has a condition scope,
837 // emit the local variable declaration.
838 if (s.getConditionVariable())
839 emitDecl(*s.getConditionVariable());
840 // C99 6.8.5p2/p4: The first substatement is executed if the
841 // expression compares unequal to 0. The condition must be a
842 // scalar type.
843 condVal = evaluateExprAsBool(s.getCond());
844 } else {
845 condVal = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
846 }
847 builder.createCondition(condVal);
848 },
849 /*bodyBuilder=*/
850 [&](mlir::OpBuilder &b, mlir::Location loc) {
851 // The scope of the for loop body is nested within the scope of the
852 // for loop's init-statement and condition.
853 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
854 loopRes = mlir::failure();
856 },
857 /*stepBuilder=*/
858 [&](mlir::OpBuilder &b, mlir::Location loc) {
859 if (s.getInc())
860 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
861 loopRes = mlir::failure();
862 builder.createYield(loc);
863 });
864 return loopRes;
865 };
866
867 auto res = mlir::success();
868 auto scopeLoc = getLoc(s.getSourceRange());
869 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
870 [&](mlir::OpBuilder &b, mlir::Location loc) {
871 LexicalScope lexScope{*this, loc,
872 builder.getInsertionBlock()};
873 res = forStmtBuilder();
874 });
875
876 if (res.failed())
877 return res;
878
879 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
880 return mlir::success();
881}
882
883mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &s) {
884 cir::DoWhileOp doWhileOp;
885
886 // TODO: pass in array of attributes.
887 auto doStmtBuilder = [&]() -> mlir::LogicalResult {
888 mlir::LogicalResult loopRes = mlir::success();
890 // From LLVM: if there are any cleanups between here and the loop-exit
891 // scope, create a block to stage a loop exit along.
892 // We probably already do the right thing because of ScopeOp, but make
893 // sure we handle all cases.
895
896 doWhileOp = builder.createDoWhile(
897 getLoc(s.getSourceRange()),
898 /*condBuilder=*/
899 [&](mlir::OpBuilder &b, mlir::Location loc) {
900 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
901 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
902 // C99 6.8.5p2/p4: The first substatement is executed if the
903 // expression compares unequal to 0. The condition must be a
904 // scalar type.
905 mlir::Value condVal = evaluateExprAsBool(s.getCond());
906 builder.createCondition(condVal);
907 },
908 /*bodyBuilder=*/
909 [&](mlir::OpBuilder &b, mlir::Location loc) {
910 // The scope of the do-while loop body is a nested scope.
911 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
912 loopRes = mlir::failure();
913 emitStopPoint(&s);
914 });
915 return loopRes;
916 };
917
918 mlir::LogicalResult res = mlir::success();
919 mlir::Location scopeLoc = getLoc(s.getSourceRange());
920 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
921 [&](mlir::OpBuilder &b, mlir::Location loc) {
922 LexicalScope lexScope{*this, loc,
923 builder.getInsertionBlock()};
924 res = doStmtBuilder();
925 });
926
927 if (res.failed())
928 return res;
929
930 terminateBody(builder, doWhileOp.getBody(), getLoc(s.getEndLoc()));
931 return mlir::success();
932}
933
934mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &s) {
935 cir::WhileOp whileOp;
936
937 // TODO: pass in array of attributes.
938 auto whileStmtBuilder = [&]() -> mlir::LogicalResult {
939 mlir::LogicalResult loopRes = mlir::success();
941 // From LLVM: if there are any cleanups between here and the loop-exit
942 // scope, create a block to stage a loop exit along.
943 // We probably already do the right thing because of ScopeOp, but make
944 // sure we handle all cases.
946
947 whileOp = builder.createWhile(
948 getLoc(s.getSourceRange()),
949 /*condBuilder=*/
950 [&](mlir::OpBuilder &b, mlir::Location loc) {
951 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
952 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
953 mlir::Value condVal;
954 // If the for statement has a condition scope,
955 // emit the local variable declaration.
956 if (s.getConditionVariable())
957 emitDecl(*s.getConditionVariable());
958 // C99 6.8.5p2/p4: The first substatement is executed if the
959 // expression compares unequal to 0. The condition must be a
960 // scalar type.
961 condVal = evaluateExprAsBool(s.getCond());
962 builder.createCondition(condVal);
963 },
964 /*bodyBuilder=*/
965 [&](mlir::OpBuilder &b, mlir::Location loc) {
966 // The scope of the while loop body is a nested scope.
967 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
968 loopRes = mlir::failure();
969 emitStopPoint(&s);
970 });
971 return loopRes;
972 };
973
974 mlir::LogicalResult res = mlir::success();
975 mlir::Location scopeLoc = getLoc(s.getSourceRange());
976 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
977 [&](mlir::OpBuilder &b, mlir::Location loc) {
978 LexicalScope lexScope{*this, loc,
979 builder.getInsertionBlock()};
980 res = whileStmtBuilder();
981 });
982
983 if (res.failed())
984 return res;
985
986 terminateBody(builder, whileOp.getBody(), getLoc(s.getEndLoc()));
987 return mlir::success();
988}
989
990mlir::LogicalResult CIRGenFunction::emitSwitchBody(const Stmt *s) {
991 // It is rare but legal if the switch body is not a compound stmt. e.g.,
992 //
993 // switch(a)
994 // while(...) {
995 // case1
996 // ...
997 // case2
998 // ...
999 // }
1000 if (!isa<CompoundStmt>(s))
1001 return emitStmt(s, /*useCurrentScope=*/true);
1002
1004
1005 mlir::Block *swtichBlock = builder.getBlock();
1006 for (auto *c : compoundStmt->body()) {
1007 if (auto *switchCase = dyn_cast<SwitchCase>(c)) {
1008 builder.setInsertionPointToEnd(swtichBlock);
1009 // Reset insert point automatically, so that we can attach following
1010 // random stmt to the region of previous built case op to try to make
1011 // the being generated `cir.switch` to be in simple form.
1012 if (mlir::failed(
1013 emitSwitchCase(*switchCase, /*buildingTopLevelCase=*/true)))
1014 return mlir::failure();
1015
1016 continue;
1017 }
1018
1019 // Otherwise, just build the statements in the nearest case region.
1020 if (mlir::failed(emitStmt(c, /*useCurrentScope=*/!isa<CompoundStmt>(c))))
1021 return mlir::failure();
1022 }
1023
1024 return mlir::success();
1025}
1026
1028 // TODO: LLVM codegen does some early optimization to fold the condition and
1029 // only emit live cases. CIR should use MLIR to achieve similar things,
1030 // nothing to be done here.
1031 // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))...
1033
1034 SwitchOp swop;
1035 auto switchStmtBuilder = [&]() -> mlir::LogicalResult {
1036 if (s.getInit())
1037 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
1038 return mlir::failure();
1039
1040 if (s.getConditionVariable())
1041 emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true);
1042
1043 mlir::Value condV = emitScalarExpr(s.getCond());
1044
1045 // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts())
1048 // TODO: if the switch has a condition wrapped by __builtin_unpredictable?
1050
1051 mlir::LogicalResult res = mlir::success();
1052 swop = SwitchOp::create(
1053 builder, getLoc(s.getBeginLoc()), condV,
1054 /*switchBuilder=*/
1055 [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) {
1056 curLexScope->setAsSwitch();
1057
1058 condTypeStack.push_back(condV.getType());
1059
1060 res = emitSwitchBody(s.getBody());
1061
1062 condTypeStack.pop_back();
1063 });
1064
1065 return res;
1066 };
1067
1068 // The switch scope contains the full source range for SwitchStmt.
1069 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1070 mlir::LogicalResult res = mlir::success();
1071 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1072 [&](mlir::OpBuilder &b, mlir::Location loc) {
1073 LexicalScope lexScope{*this, loc,
1074 builder.getInsertionBlock()};
1075 res = switchStmtBuilder();
1076 });
1077
1079 swop.collectCases(cases);
1080 for (auto caseOp : cases)
1081 terminateBody(builder, caseOp.getCaseRegion(), caseOp.getLoc());
1082 terminateBody(builder, swop.getBody(), swop.getLoc());
1083
1084 return res;
1085}
1086
1087void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
1088 QualType ty) {
1089 if (rv.isScalar()) {
1090 builder.createStore(loc, rv.getValue(), returnValue);
1091 } else if (rv.isAggregate()) {
1092 LValue dest = makeAddrLValue(returnValue, ty);
1095 } else {
1096 cgm.errorNYI(loc, "emitReturnOfRValue: complex return type");
1097 }
1098 mlir::Block *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
1100 cir::BrOp::create(builder, loc, retBlock);
1101 if (ehStack.stable_begin() != currentCleanupStackDepth)
1102 cgm.errorNYI(loc, "return of r-value with cleanup stack");
1103}
static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, mlir::Location loc)
static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf, const Stmt *exprResult, AggValueSlot slot, Address *lastValue)
Defines the clang::Expr interface and subclasses for C++ expressions.
This file defines OpenACC AST classes for statement-level contructs.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
BreakStmt - This represents a break.
Definition Stmt.h:3135
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
const clang::LangOptions & getLangOpts() const
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
JumpDest returnBlock(mlir::Block *retBlock)
Unified return block.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
Definition CIRGenAsm.cpp:86
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
Address returnValue
The temporary alloca to hold the return value.
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
EHScopeStack::stable_iterator currentCleanupStackDepth
clang::ASTContext & getContext() const
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:68
bool isAggregate() const
Definition CIRGenValue.h:51
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool isScalar() const
Definition CIRGenValue.h:49
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
CaseStmt - Represent a case statement.
Definition Stmt.h:1920
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1720
ContinueStmt - This represents a continue.
Definition Stmt.h:3119
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1611
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2832
This represents one expression.
Definition Expr.h:112
QualType getType() const
Definition Expr.h:144
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2888
GotoStmt - This represents a direct goto.
Definition Stmt.h:2969
IfStmt - This represents an if/then/else.
Definition Stmt.h:2259
Represents the declaration of a label.
Definition Decl.h:524
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.h:554
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2146
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A (possibly-)qualified type.
Definition TypeBase.h:937
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3160
Stmt - This represents one statement.
Definition Stmt.h:85
@ NoStmtClass
Definition Stmt.h:88
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2509
bool isVoidType() const
Definition TypeBase.h:8871
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2697
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const internal::VariadicDynCastAllOfMatcher< Stmt, CompoundStmt > compoundStmt
Matches compound statements.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, SwitchCase > switchCase
Matches case and default statements inside switch statements.
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
static bool aggValueSlotGC()
static bool loopInfoStack()
static bool emitCondLikelihoodViaExpectIntrinsic()
static bool constantFoldSwitchStatement()
static bool insertBuiltinUnpredictable()
static bool ehstackBranches()
static bool emitBranchThroughCleanup()
static bool requiresCleanups()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...