clang 23.0.0git
CIRGenStmt.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Stmt nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15
16#include "mlir/IR/Builders.h"
17#include "mlir/IR/Location.h"
18#include "mlir/Support/LLVM.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/Stmt.h"
24
25using namespace clang;
26using namespace clang::CIRGen;
27using namespace cir;
28
29static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf,
30 const Stmt *exprResult,
31 AggValueSlot slot,
32 Address *lastValue) {
33 // We have to special case labels here. They are statements, but when put
34 // at the end of a statement expression, they yield the value of their
35 // subexpression. Handle this by walking through all labels we encounter,
36 // emitting them before we evaluate the subexpr.
37 // Similar issues arise for attributed statements.
38 while (!isa<Expr>(exprResult)) {
39 if (const auto *ls = dyn_cast<LabelStmt>(exprResult)) {
40 if (cgf.emitLabel(*ls->getDecl()).failed())
41 return mlir::failure();
42 exprResult = ls->getSubStmt();
43 } else if (const auto *as = dyn_cast<AttributedStmt>(exprResult)) {
44 // FIXME: Update this if we ever have attributes that affect the
45 // semantics of an expression.
46 exprResult = as->getSubStmt();
47 } else {
48 llvm_unreachable("Unknown value statement");
49 }
50 }
51
52 const Expr *e = cast<Expr>(exprResult);
53 QualType exprTy = e->getType();
54 if (cgf.hasAggregateEvaluationKind(exprTy)) {
55 cgf.emitAggExpr(e, slot);
56 } else {
57 // We can't return an RValue here because there might be cleanups at
58 // the end of the StmtExpr. Because of that, we have to emit the result
59 // here into a temporary alloca.
60 cgf.emitAnyExprToMem(e, *lastValue, Qualifiers(),
61 /*IsInit*/ false);
62 }
63
64 return mlir::success();
65}
66
68 const CompoundStmt &s, Address *lastValue, AggValueSlot slot) {
69 mlir::LogicalResult result = mlir::success();
70 const Stmt *exprResult = s.body_back();
71 assert((!lastValue || (lastValue && exprResult)) &&
72 "If lastValue is not null then the CompoundStmt must have a "
73 "StmtExprResult");
74
75 for (const Stmt *curStmt : s.body()) {
76 const bool saveResult = lastValue && exprResult == curStmt;
77 if (saveResult) {
78 if (emitStmtWithResult(*this, exprResult, slot, lastValue).failed())
79 result = mlir::failure();
80 } else {
81 if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
82 result = mlir::failure();
83 }
84 }
85 return result;
86}
87
88mlir::LogicalResult
90 for (const Attr *attr : s.getAttrs()) {
91 switch (attr->getKind()) {
92 default:
93 break;
94 case attr::NoMerge:
95 case attr::NoInline:
96 case attr::AlwaysInline:
97 case attr::NoConvergent:
98 case attr::MustTail:
99 case attr::Atomic:
100 case attr::HLSLControlFlowHint:
101 cgm.errorNYI(s.getSourceRange(),
102 "Unimplemented statement attribute: ", attr->getKind());
103 break;
104 case attr::CXXAssume: {
105 const Expr *assumptionExpr = cast<CXXAssumeAttr>(attr)->getAssumption();
106 if (getLangOpts().CXXAssumptions && builder.getInsertionBlock() &&
107 !assumptionExpr->HasSideEffects(getContext())) {
108 mlir::Value assumptionValue = emitCheckedArgForAssume(assumptionExpr);
109 cir::AssumeOp::create(builder, getLoc(s.getSourceRange()),
110 assumptionValue);
111 }
112 } break;
113 }
114 }
115
116 return emitStmt(s.getSubStmt(), /*useCurrentScope=*/true, s.getAttrs());
117}
118
120 Address *lastValue,
121 AggValueSlot slot) {
122 // Add local scope to track new declared variables.
124 mlir::Location scopeLoc = getLoc(s.getSourceRange());
125 mlir::OpBuilder::InsertPoint scopeInsPt;
126 cir::ScopeOp::create(
127 builder, scopeLoc,
128 [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
129 scopeInsPt = b.saveInsertionPoint();
130 });
131 mlir::OpBuilder::InsertionGuard guard(builder);
132 builder.restoreInsertionPoint(scopeInsPt);
133 LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
134 return emitCompoundStmtWithoutScope(s, lastValue, slot);
135}
136
140
141// Build CIR for a statement. useCurrentScope should be true if no new scopes
142// need to be created when finding a compound statement.
143mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
144 bool useCurrentScope,
146 if (mlir::succeeded(emitSimpleStmt(s, useCurrentScope)))
147 return mlir::success();
148
149 switch (s->getStmtClass()) {
151 case Stmt::CXXCatchStmtClass:
152 case Stmt::SEHExceptStmtClass:
153 case Stmt::SEHFinallyStmtClass:
154 case Stmt::MSDependentExistsStmtClass:
155 case Stmt::UnresolvedSYCLKernelCallStmtClass:
156 llvm_unreachable("invalid statement class to emit generically");
157 case Stmt::BreakStmtClass:
158 case Stmt::NullStmtClass:
159 case Stmt::CompoundStmtClass:
160 case Stmt::ContinueStmtClass:
161 case Stmt::DeclStmtClass:
162 case Stmt::ReturnStmtClass:
163 llvm_unreachable("should have emitted these statements as simple");
164
165#define STMT(Type, Base)
166#define ABSTRACT_STMT(Op)
167#define EXPR(Type, Base) case Stmt::Type##Class:
168#include "clang/AST/StmtNodes.inc"
169 {
170 assert(builder.getInsertionBlock() &&
171 "expression emission must have an insertion point");
172
174
175 // Classic codegen has a check here to see if the emitter created a new
176 // block that isn't used (comparing the incoming and outgoing insertion
177 // points) and deletes the outgoing block if it's not used. In CIR, we
178 // will handle that during the cir.canonicalize pass.
179 return mlir::success();
180 }
181 case Stmt::IfStmtClass:
182 return emitIfStmt(cast<IfStmt>(*s));
183 case Stmt::SwitchStmtClass:
185 case Stmt::ForStmtClass:
186 return emitForStmt(cast<ForStmt>(*s));
187 case Stmt::WhileStmtClass:
189 case Stmt::DoStmtClass:
190 return emitDoStmt(cast<DoStmt>(*s));
191 case Stmt::CXXTryStmtClass:
193 case Stmt::CXXForRangeStmtClass:
195 case Stmt::CoroutineBodyStmtClass:
197 case Stmt::IndirectGotoStmtClass:
199 case Stmt::CoreturnStmtClass:
201 case Stmt::OpenACCComputeConstructClass:
203 case Stmt::OpenACCLoopConstructClass:
205 case Stmt::OpenACCCombinedConstructClass:
207 case Stmt::OpenACCDataConstructClass:
209 case Stmt::OpenACCEnterDataConstructClass:
211 case Stmt::OpenACCExitDataConstructClass:
213 case Stmt::OpenACCHostDataConstructClass:
215 case Stmt::OpenACCWaitConstructClass:
217 case Stmt::OpenACCInitConstructClass:
219 case Stmt::OpenACCShutdownConstructClass:
221 case Stmt::OpenACCSetConstructClass:
223 case Stmt::OpenACCUpdateConstructClass:
225 case Stmt::OpenACCCacheConstructClass:
227 case Stmt::OpenACCAtomicConstructClass:
229 case Stmt::GCCAsmStmtClass:
230 case Stmt::MSAsmStmtClass:
231 return emitAsmStmt(cast<AsmStmt>(*s));
232 case Stmt::OMPScopeDirectiveClass:
234 case Stmt::OMPErrorDirectiveClass:
236 case Stmt::OMPParallelDirectiveClass:
238 case Stmt::OMPTaskwaitDirectiveClass:
240 case Stmt::OMPTaskyieldDirectiveClass:
242 case Stmt::OMPBarrierDirectiveClass:
244 case Stmt::OMPMetaDirectiveClass:
246 case Stmt::OMPCanonicalLoopClass:
248 case Stmt::OMPSimdDirectiveClass:
250 case Stmt::OMPTileDirectiveClass:
252 case Stmt::OMPUnrollDirectiveClass:
254 case Stmt::OMPFuseDirectiveClass:
256 case Stmt::OMPForDirectiveClass:
258 case Stmt::OMPForSimdDirectiveClass:
260 case Stmt::OMPSectionsDirectiveClass:
262 case Stmt::OMPSectionDirectiveClass:
264 case Stmt::OMPSingleDirectiveClass:
266 case Stmt::OMPMasterDirectiveClass:
268 case Stmt::OMPCriticalDirectiveClass:
270 case Stmt::OMPParallelForDirectiveClass:
272 case Stmt::OMPParallelForSimdDirectiveClass:
275 case Stmt::OMPParallelMasterDirectiveClass:
277 case Stmt::OMPParallelSectionsDirectiveClass:
280 case Stmt::OMPTaskDirectiveClass:
282 case Stmt::OMPTaskgroupDirectiveClass:
284 case Stmt::OMPFlushDirectiveClass:
286 case Stmt::OMPDepobjDirectiveClass:
288 case Stmt::OMPScanDirectiveClass:
290 case Stmt::OMPOrderedDirectiveClass:
292 case Stmt::OMPAtomicDirectiveClass:
294 case Stmt::OMPTargetDirectiveClass:
296 case Stmt::OMPTeamsDirectiveClass:
298 case Stmt::OMPCancellationPointDirectiveClass:
301 case Stmt::OMPCancelDirectiveClass:
303 case Stmt::OMPTargetDataDirectiveClass:
305 case Stmt::OMPTargetEnterDataDirectiveClass:
308 case Stmt::OMPTargetExitDataDirectiveClass:
310 case Stmt::OMPTargetParallelDirectiveClass:
312 case Stmt::OMPTargetParallelForDirectiveClass:
315 case Stmt::OMPTaskLoopDirectiveClass:
317 case Stmt::OMPTaskLoopSimdDirectiveClass:
319 case Stmt::OMPMaskedTaskLoopDirectiveClass:
321 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
324 case Stmt::OMPMasterTaskLoopDirectiveClass:
326 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
329 case Stmt::OMPParallelGenericLoopDirectiveClass:
332 case Stmt::OMPParallelMaskedDirectiveClass:
334 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
337 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
340 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
343 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
346 case Stmt::OMPDistributeDirectiveClass:
348 case Stmt::OMPDistributeParallelForDirectiveClass:
351 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
354 case Stmt::OMPDistributeSimdDirectiveClass:
356 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
359 case Stmt::OMPTargetParallelForSimdDirectiveClass:
362 case Stmt::OMPTargetSimdDirectiveClass:
364 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
367 case Stmt::OMPTargetUpdateDirectiveClass:
369 case Stmt::OMPTeamsDistributeDirectiveClass:
372 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
375 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
378 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
381 case Stmt::OMPTeamsGenericLoopDirectiveClass:
384 case Stmt::OMPTargetTeamsDirectiveClass:
386 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
389 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
392 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
395 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
398 case Stmt::OMPInteropDirectiveClass:
400 case Stmt::OMPDispatchDirectiveClass:
402 case Stmt::OMPGenericLoopDirectiveClass:
404 case Stmt::OMPReverseDirectiveClass:
406 case Stmt::OMPSplitDirectiveClass:
408 case Stmt::OMPInterchangeDirectiveClass:
410 case Stmt::OMPAssumeDirectiveClass:
412 case Stmt::OMPMaskedDirectiveClass:
414 case Stmt::OMPStripeDirectiveClass:
416 case Stmt::LabelStmtClass:
417 case Stmt::AttributedStmtClass:
418 case Stmt::GotoStmtClass:
419 case Stmt::DefaultStmtClass:
420 case Stmt::CaseStmtClass:
421 case Stmt::SEHLeaveStmtClass:
422 case Stmt::SYCLKernelCallStmtClass:
423 case Stmt::CapturedStmtClass:
424 case Stmt::ObjCAtTryStmtClass:
425 case Stmt::ObjCAtThrowStmtClass:
426 case Stmt::ObjCAtSynchronizedStmtClass:
427 case Stmt::ObjCForCollectionStmtClass:
428 case Stmt::ObjCAutoreleasePoolStmtClass:
429 case Stmt::SEHTryStmtClass:
430 case Stmt::ObjCAtCatchStmtClass:
431 case Stmt::ObjCAtFinallyStmtClass:
432 case Stmt::DeferStmtClass:
433 cgm.errorNYI(s->getSourceRange(),
434 std::string("emitStmt: ") + s->getStmtClassName());
435 return mlir::failure();
436 }
437
438 llvm_unreachable("Unexpected statement class");
439}
440
441mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
442 bool useCurrentScope) {
443 switch (s->getStmtClass()) {
444 default:
445 return mlir::failure();
446 case Stmt::DeclStmtClass:
447 return emitDeclStmt(cast<DeclStmt>(*s));
448 case Stmt::CompoundStmtClass:
449 if (useCurrentScope)
452 case Stmt::GotoStmtClass:
453 return emitGotoStmt(cast<GotoStmt>(*s));
454 case Stmt::ContinueStmtClass:
456
457 // NullStmt doesn't need any handling, but we need to say we handled it.
458 case Stmt::NullStmtClass:
459 break;
460
461 case Stmt::LabelStmtClass:
463 case Stmt::CaseStmtClass:
464 case Stmt::DefaultStmtClass:
465 // If we reached here, we must not handling a switch case in the top level.
467 /*buildingTopLevelCase=*/false);
468 break;
469
470 case Stmt::BreakStmtClass:
472 case Stmt::ReturnStmtClass:
474 case Stmt::AttributedStmtClass:
476 }
477
478 return mlir::success();
479}
480
482
483 if (emitLabel(*s.getDecl()).failed())
484 return mlir::failure();
485
486 if (getContext().getLangOpts().EHAsynch && s.isSideEntry())
487 getCIRGenModule().errorNYI(s.getSourceRange(), "IsEHa: not implemented.");
488
489 return emitStmt(s.getSubStmt(), /*useCurrentScope*/ true);
490}
491
492// Add a terminating yield on a body region if no other terminators are used.
494 mlir::Location loc) {
495 if (r.empty())
496 return;
497
499 unsigned numBlocks = r.getBlocks().size();
500 for (auto &block : r.getBlocks()) {
501 // Already cleanup after return operations, which might create
502 // empty blocks if emitted as last stmt.
503 if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() &&
504 block.hasNoSuccessors())
505 eraseBlocks.push_back(&block);
506
507 if (block.empty() ||
508 !block.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
509 mlir::OpBuilder::InsertionGuard guardCase(builder);
510 builder.setInsertionPointToEnd(&block);
511 builder.createYield(loc);
512 }
513 }
514
515 for (auto *b : eraseBlocks)
516 b->erase();
517}
518
519mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
520 mlir::LogicalResult res = mlir::success();
521 // The else branch of a consteval if statement is always the only branch
522 // that can be runtime evaluated.
523 const Stmt *constevalExecuted;
524 if (s.isConsteval()) {
525 constevalExecuted = s.isNegatedConsteval() ? s.getThen() : s.getElse();
526 if (!constevalExecuted) {
527 // No runtime code execution required
528 return res;
529 }
530 }
531
532 // C99 6.8.4.1: The first substatement is executed if the expression
533 // compares unequal to 0. The condition must be a scalar type.
534 auto ifStmtBuilder = [&]() -> mlir::LogicalResult {
535 if (s.isConsteval())
536 return emitStmt(constevalExecuted, /*useCurrentScope=*/true);
537
538 if (s.getInit())
539 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
540 return mlir::failure();
541
542 if (s.getConditionVariable())
543 emitDecl(*s.getConditionVariable());
544
545 // If the condition folds to a constant and this is an 'if constexpr',
546 // we simplify it early in CIRGen to avoid emitting the full 'if'.
547 bool condConstant;
548 if (constantFoldsToBool(s.getCond(), condConstant, s.isConstexpr())) {
549 if (s.isConstexpr()) {
550 // Handle "if constexpr" explicitly here to avoid generating some
551 // ill-formed code since in CIR the "if" is no longer simplified
552 // in this lambda like in Clang but postponed to other MLIR
553 // passes.
554 if (const Stmt *executed = condConstant ? s.getThen() : s.getElse())
555 return emitStmt(executed, /*useCurrentScope=*/true);
556 // There is nothing to execute at runtime.
557 // TODO(cir): there is still an empty cir.scope generated by the caller.
558 return mlir::success();
559 }
560 }
561
564 return emitIfOnBoolExpr(s.getCond(), s.getThen(), s.getElse());
565 };
566
567 // TODO: Add a new scoped symbol table.
568 // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
569 // The if scope contains the full source range for IfStmt.
570 mlir::Location scopeLoc = getLoc(s.getSourceRange());
571 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
572 [&](mlir::OpBuilder &b, mlir::Location loc) {
573 LexicalScope lexScope{*this, scopeLoc,
574 builder.getInsertionBlock()};
575 res = ifStmtBuilder();
576 });
577
578 return res;
579}
580
581mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
582 assert(builder.getInsertionBlock() && "expected valid insertion point");
583
584 for (const Decl *i : s.decls())
585 emitDecl(*i, /*evaluateConditionDecl=*/true);
586
587 return mlir::success();
588}
589
590mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
591 mlir::Location loc = getLoc(s.getSourceRange());
592 const Expr *rv = s.getRetValue();
593
594 RunCleanupsScope cleanupScope(*this);
595 bool createNewScope = false;
596 if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
597 rv = ewc->getSubExpr();
598 createNewScope = true;
599 }
600
601 auto handleReturnVal = [&]() {
602 if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
603 s.getNRVOCandidate()->isNRVOVariable()) {
605 // Apply the named return value optimization for this return statement,
606 // which means doing nothing: the appropriate result has already been
607 // constructed into the NRVO variable.
608
609 // If there is an NRVO flag for this variable, set it to 1 into indicate
610 // that the cleanup code should not destroy the variable.
611 if (auto nrvoFlag = nrvoFlags[s.getNRVOCandidate()])
612 builder.createFlagStore(loc, true, nrvoFlag);
613 } else if (!rv) {
614 // No return expression. Do nothing.
615 } else if (rv->getType()->isVoidType()) {
616 // Make sure not to return anything, but evaluate the expression
617 // for side effects.
618 if (rv) {
619 emitAnyExpr(rv);
620 }
621 } else if (cast<FunctionDecl>(curGD.getDecl())
622 ->getReturnType()
623 ->isReferenceType()) {
624 // If this function returns a reference, take the address of the
625 // expression rather than the value.
627 builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
628 *fnRetAlloca);
629 } else {
630 mlir::Value value = nullptr;
632 case cir::TEK_Scalar:
633 value = emitScalarExpr(rv);
634 if (value) { // Change this to an assert once emitScalarExpr is complete
635 builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
636 }
637 break;
638 case cir::TEK_Complex:
641 /*isInit=*/true);
642 break;
649 break;
650 }
651 }
652 };
653
654 if (!createNewScope) {
655 handleReturnVal();
656 } else {
657 FullExprCleanupScope fullExprScope(*this, rv);
658 handleReturnVal();
659 }
660
661 cleanupScope.forceCleanup();
662
663 // Classic codegen emits a branch through any cleanups before continuing to
664 // a shared return block. Because CIR handles branching through cleanups
665 // during the CFG flattening phase, we can just emit the return statement
666 // directly.
667 // TODO(cir): Eliminate this redundant load and the store above when we can.
668 if (fnRetAlloca) {
669 // Load the value from `__retval` and return it via the `cir.return` op.
670 cir::AllocaOp retAlloca =
671 mlir::cast<cir::AllocaOp>(fnRetAlloca->getDefiningOp());
672 auto value = cir::LoadOp::create(builder, loc, retAlloca.getAllocaType(),
673 *fnRetAlloca);
674
675 cir::ReturnOp::create(builder, loc, {value});
676 } else {
677 cir::ReturnOp::create(builder, loc);
678 }
679
680 // Insert the new block to continue codegen after the return statement.
681 // This will get deleted if we don't populate it. This handles the case of
682 // unreachable statements below a return.
683 builder.createBlock(builder.getBlock()->getParent());
684 return mlir::success();
685}
686
687mlir::LogicalResult CIRGenFunction::emitGotoStmt(const clang::GotoStmt &s) {
688 // FIXME: LLVM codegen inserts emit a stop point here for debug info
689 // sake when the insertion point is available, but doesn't do
690 // anything special when there isn't. We haven't implemented debug
691 // info support just yet, look at this again once we have it.
693
694 cir::GotoOp::create(builder, getLoc(s.getSourceRange()),
695 s.getLabel()->getName());
696
697 // A goto marks the end of a block, create a new one for codegen after
698 // emitGotoStmt can resume building in that block.
699 // Insert the new block to continue codegen after goto.
700 builder.createBlock(builder.getBlock()->getParent());
701
702 return mlir::success();
703}
704
705mlir::LogicalResult
707 mlir::Value val = emitScalarExpr(s.getTarget());
708 assert(indirectGotoBlock &&
709 "If you jumping to a indirect branch should be alareadye emitted");
710 cir::BrOp::create(builder, getLoc(s.getSourceRange()), indirectGotoBlock,
711 val);
712 builder.createBlock(builder.getBlock()->getParent());
713 return mlir::success();
714}
715
716mlir::LogicalResult
718 builder.createContinue(getLoc(s.getKwLoc()));
719
720 // Insert the new block to continue codegen after the continue statement.
721 builder.createBlock(builder.getBlock()->getParent());
722
723 return mlir::success();
724}
725
726mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) {
727 // Create a new block to tag with a label and add a branch from
728 // the current one to it. If the block is empty just call attach it
729 // to this label.
730 mlir::Block *currBlock = builder.getBlock();
731 mlir::Block *labelBlock = currBlock;
732
733 if (!currBlock->empty() || currBlock->isEntryBlock()) {
734 {
735 mlir::OpBuilder::InsertionGuard guard(builder);
736 labelBlock = builder.createBlock(builder.getBlock()->getParent());
737 }
738 cir::BrOp::create(builder, getLoc(d.getSourceRange()), labelBlock);
739 }
740
741 builder.setInsertionPointToEnd(labelBlock);
742 cir::LabelOp label =
743 cir::LabelOp::create(builder, getLoc(d.getSourceRange()), d.getName());
744 builder.setInsertionPointToEnd(labelBlock);
745 auto func = cast<cir::FuncOp>(curFn);
746 cgm.mapBlockAddress(cir::BlockAddrInfoAttr::get(builder.getContext(),
747 func.getSymNameAttr(),
748 label.getLabelAttr()),
749 label);
750 // FIXME: emit debug info for labels, incrementProfileCounter
753 return mlir::success();
754}
755
757 builder.createBreak(getLoc(s.getKwLoc()));
758
759 // Insert the new block to continue codegen after the break statement.
760 builder.createBlock(builder.getBlock()->getParent());
761
762 return mlir::success();
763}
764
765template <typename T>
766mlir::LogicalResult
767CIRGenFunction::emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
768 mlir::ArrayAttr value, CaseOpKind kind,
769 bool buildingTopLevelCase) {
770
772 "only case or default stmt go here");
773
774 mlir::LogicalResult result = mlir::success();
775
776 mlir::Location loc = getLoc(stmt->getBeginLoc());
777
778 enum class SubStmtKind { Case, Default, Other };
779 SubStmtKind subStmtKind = SubStmtKind::Other;
780 const Stmt *sub = stmt->getSubStmt();
781
782 mlir::OpBuilder::InsertPoint insertPoint;
783 CaseOp::create(builder, loc, value, kind, insertPoint);
784
785 {
786 mlir::OpBuilder::InsertionGuard guardSwitch(builder);
787 builder.restoreInsertionPoint(insertPoint);
788
789 if (isa<DefaultStmt>(sub) && isa<CaseStmt>(stmt)) {
790 subStmtKind = SubStmtKind::Default;
791 builder.createYield(loc);
792 } else if (isa<CaseStmt>(sub) && isa<DefaultStmt, CaseStmt>(stmt)) {
793 subStmtKind = SubStmtKind::Case;
794 builder.createYield(loc);
795 } else {
796 result = emitStmt(sub, /*useCurrentScope=*/!isa<CompoundStmt>(sub));
797 }
798
799 insertPoint = builder.saveInsertionPoint();
800 }
801
802 // If the substmt is default stmt or case stmt, try to handle the special case
803 // to make it into the simple form. e.g.
804 //
805 // swtich () {
806 // case 1:
807 // default:
808 // ...
809 // }
810 //
811 // we prefer generating
812 //
813 // cir.switch() {
814 // cir.case(equal, 1) {
815 // cir.yield
816 // }
817 // cir.case(default) {
818 // ...
819 // }
820 // }
821 //
822 // than
823 //
824 // cir.switch() {
825 // cir.case(equal, 1) {
826 // cir.case(default) {
827 // ...
828 // }
829 // }
830 // }
831 //
832 // We don't need to revert this if we find the current switch can't be in
833 // simple form later since the conversion itself should be harmless.
834 if (subStmtKind == SubStmtKind::Case) {
835 result = emitCaseStmt(*cast<CaseStmt>(sub), condType, buildingTopLevelCase);
836 } else if (subStmtKind == SubStmtKind::Default) {
837 result = emitDefaultStmt(*cast<DefaultStmt>(sub), condType,
838 buildingTopLevelCase);
839 } else if (buildingTopLevelCase) {
840 // If we're building a top level case, try to restore the insert point to
841 // the case we're building, then we can attach more random stmts to the
842 // case to make generating `cir.switch` operation to be a simple form.
843 builder.restoreInsertionPoint(insertPoint);
844 }
845
846 return result;
847}
848
849mlir::LogicalResult CIRGenFunction::emitCaseStmt(const CaseStmt &s,
850 mlir::Type condType,
851 bool buildingTopLevelCase) {
852 cir::CaseOpKind kind;
853 mlir::ArrayAttr value;
854 llvm::APSInt intVal = s.getLHS()->EvaluateKnownConstInt(getContext());
855
856 // If the case statement has an RHS value, it is representing a GNU
857 // case range statement, where LHS is the beginning of the range
858 // and RHS is the end of the range.
859 if (const Expr *rhs = s.getRHS()) {
860 llvm::APSInt endVal = rhs->EvaluateKnownConstInt(getContext());
861 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal),
862 cir::IntAttr::get(condType, endVal)});
863 kind = cir::CaseOpKind::Range;
864 } else {
865 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal)});
866 kind = cir::CaseOpKind::Equal;
867 }
868
869 return emitCaseDefaultCascade(&s, condType, value, kind,
870 buildingTopLevelCase);
871}
872
874 mlir::Type condType,
875 bool buildingTopLevelCase) {
876 return emitCaseDefaultCascade(&s, condType, builder.getArrayAttr({}),
877 cir::CaseOpKind::Default, buildingTopLevelCase);
878}
879
880mlir::LogicalResult CIRGenFunction::emitSwitchCase(const SwitchCase &s,
881 bool buildingTopLevelCase) {
882 assert(!condTypeStack.empty() &&
883 "build switch case without specifying the type of the condition");
884
885 if (s.getStmtClass() == Stmt::CaseStmtClass)
887 buildingTopLevelCase);
888
889 if (s.getStmtClass() == Stmt::DefaultStmtClass)
891 buildingTopLevelCase);
892
893 llvm_unreachable("expect case or default stmt");
894}
895
896mlir::LogicalResult
898 ArrayRef<const Attr *> forAttrs) {
899 cir::ForOp forOp;
900
901 // TODO(cir): pass in array of attributes.
902 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
903 mlir::LogicalResult loopRes = mlir::success();
904 // Evaluate the first pieces before the loop.
905 if (s.getInit())
906 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
907 return mlir::failure();
908 if (emitStmt(s.getRangeStmt(), /*useCurrentScope=*/true).failed())
909 return mlir::failure();
910 if (emitStmt(s.getBeginStmt(), /*useCurrentScope=*/true).failed())
911 return mlir::failure();
912 if (emitStmt(s.getEndStmt(), /*useCurrentScope=*/true).failed())
913 return mlir::failure();
914
916 // From LLVM: if there are any cleanups between here and the loop-exit
917 // scope, create a block to stage a loop exit along.
918 // We probably already do the right thing because of ScopeOp, but make
919 // sure we handle all cases.
921
922 forOp = builder.createFor(
923 getLoc(s.getSourceRange()),
924 /*condBuilder=*/
925 [&](mlir::OpBuilder &b, mlir::Location loc) {
926 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
927 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
928 mlir::Value condVal = evaluateExprAsBool(s.getCond());
929 builder.createCondition(condVal);
930 },
931 /*bodyBuilder=*/
932 [&](mlir::OpBuilder &b, mlir::Location loc) {
933 // https://en.cppreference.com/w/cpp/language/for
934 // In C++ the scope of the init-statement and the scope of
935 // statement are one and the same.
936 bool useCurrentScope = true;
937 if (emitStmt(s.getLoopVarStmt(), useCurrentScope).failed())
938 loopRes = mlir::failure();
939 if (emitStmt(s.getBody(), useCurrentScope).failed())
940 loopRes = mlir::failure();
941 emitStopPoint(&s);
942 },
943 /*stepBuilder=*/
944 [&](mlir::OpBuilder &b, mlir::Location loc) {
945 if (s.getInc())
946 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
947 loopRes = mlir::failure();
948 builder.createYield(loc);
949 });
950 return loopRes;
951 };
952
953 mlir::LogicalResult res = mlir::success();
954 mlir::Location scopeLoc = getLoc(s.getSourceRange());
955 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
956 [&](mlir::OpBuilder &b, mlir::Location loc) {
957 // Create a cleanup scope for the condition
958 // variable cleanups. Logical equivalent from
959 // LLVM codegn for LexicalScope
960 // ConditionScope(*this, S.getSourceRange())...
961 LexicalScope lexScope{*this, loc,
962 builder.getInsertionBlock()};
963 res = forStmtBuilder();
964 });
965
966 if (res.failed())
967 return res;
968
969 terminateStructuredRegionBody(forOp.getBody(), getLoc(s.getEndLoc()));
970 return mlir::success();
971}
972
973mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &s) {
974 cir::ForOp forOp;
975
976 // TODO: pass in an array of attributes.
977 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
978 mlir::LogicalResult loopRes = mlir::success();
979 // Evaluate the first part before the loop.
980 if (s.getInit())
981 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
982 return mlir::failure();
984 // In the classic codegen, if there are any cleanups between here and the
985 // loop-exit scope, a block is created to stage the loop exit. We probably
986 // already do the right thing because of ScopeOp, but we need more testing
987 // to be sure we handle all cases.
989
990 forOp = builder.createFor(
991 getLoc(s.getSourceRange()),
992 /*condBuilder=*/
993 [&](mlir::OpBuilder &b, mlir::Location loc) {
994 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
995 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
996 mlir::Value condVal;
997 if (s.getCond()) {
998 // If the for statement has a condition scope,
999 // emit the local variable declaration.
1000 if (s.getConditionVariable())
1001 emitDecl(*s.getConditionVariable());
1002 // C99 6.8.5p2/p4: The first substatement is executed if the
1003 // expression compares unequal to 0. The condition must be a
1004 // scalar type.
1005 condVal = evaluateExprAsBool(s.getCond());
1006 } else {
1007 condVal = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1008 }
1009 builder.createCondition(condVal);
1010 },
1011 /*bodyBuilder=*/
1012 [&](mlir::OpBuilder &b, mlir::Location loc) {
1013 // The scope of the for loop body is nested within the scope of the
1014 // for loop's init-statement and condition.
1015 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1016 loopRes = mlir::failure();
1017 emitStopPoint(&s);
1018 },
1019 /*stepBuilder=*/
1020 [&](mlir::OpBuilder &b, mlir::Location loc) {
1021 if (s.getInc())
1022 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
1023 loopRes = mlir::failure();
1024 builder.createYield(loc);
1025 });
1026 return loopRes;
1027 };
1028
1029 auto res = mlir::success();
1030 auto scopeLoc = getLoc(s.getSourceRange());
1031 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1032 [&](mlir::OpBuilder &b, mlir::Location loc) {
1033 LexicalScope lexScope{*this, loc,
1034 builder.getInsertionBlock()};
1035 res = forStmtBuilder();
1036 });
1037
1038 if (res.failed())
1039 return res;
1040
1041 terminateStructuredRegionBody(forOp.getBody(), getLoc(s.getEndLoc()));
1042 return mlir::success();
1043}
1044
1045mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &s) {
1046 cir::DoWhileOp doWhileOp;
1047
1048 // TODO: pass in array of attributes.
1049 auto doStmtBuilder = [&]() -> mlir::LogicalResult {
1050 mlir::LogicalResult loopRes = mlir::success();
1052 // From LLVM: if there are any cleanups between here and the loop-exit
1053 // scope, create a block to stage a loop exit along.
1054 // We probably already do the right thing because of ScopeOp, but make
1055 // sure we handle all cases.
1057
1058 doWhileOp = builder.createDoWhile(
1059 getLoc(s.getSourceRange()),
1060 /*condBuilder=*/
1061 [&](mlir::OpBuilder &b, mlir::Location loc) {
1062 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1063 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1064 // C99 6.8.5p2/p4: The first substatement is executed if the
1065 // expression compares unequal to 0. The condition must be a
1066 // scalar type.
1067 mlir::Value condVal = evaluateExprAsBool(s.getCond());
1068 builder.createCondition(condVal);
1069 },
1070 /*bodyBuilder=*/
1071 [&](mlir::OpBuilder &b, mlir::Location loc) {
1072 // The scope of the do-while loop body is a nested scope.
1073 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1074 loopRes = mlir::failure();
1075 emitStopPoint(&s);
1076 });
1077 return loopRes;
1078 };
1079
1080 mlir::LogicalResult res = mlir::success();
1081 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1082 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1083 [&](mlir::OpBuilder &b, mlir::Location loc) {
1084 LexicalScope lexScope{*this, loc,
1085 builder.getInsertionBlock()};
1086 res = doStmtBuilder();
1087 });
1088
1089 if (res.failed())
1090 return res;
1091
1092 terminateStructuredRegionBody(doWhileOp.getBody(), getLoc(s.getEndLoc()));
1093 return mlir::success();
1094}
1095
1096mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &s) {
1097 cir::WhileOp whileOp;
1098
1099 // TODO: pass in array of attributes.
1100 auto whileStmtBuilder = [&]() -> mlir::LogicalResult {
1101 mlir::LogicalResult loopRes = mlir::success();
1103 // From LLVM: if there are any cleanups between here and the loop-exit
1104 // scope, create a block to stage a loop exit along.
1105 // We probably already do the right thing because of ScopeOp, but make
1106 // sure we handle all cases.
1108
1109 whileOp = builder.createWhile(
1110 getLoc(s.getSourceRange()),
1111 /*condBuilder=*/
1112 [&](mlir::OpBuilder &b, mlir::Location loc) {
1113 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1114 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1115 mlir::Value condVal;
1116 // If the for statement has a condition scope,
1117 // emit the local variable declaration.
1118 if (s.getConditionVariable())
1119 emitDecl(*s.getConditionVariable());
1120 // C99 6.8.5p2/p4: The first substatement is executed if the
1121 // expression compares unequal to 0. The condition must be a
1122 // scalar type.
1123 condVal = evaluateExprAsBool(s.getCond());
1124 builder.createCondition(condVal);
1125 },
1126 /*bodyBuilder=*/
1127 [&](mlir::OpBuilder &b, mlir::Location loc) {
1128 // The scope of the while loop body is a nested scope.
1129 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1130 loopRes = mlir::failure();
1131 emitStopPoint(&s);
1132 });
1133 return loopRes;
1134 };
1135
1136 mlir::LogicalResult res = mlir::success();
1137 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1138 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1139 [&](mlir::OpBuilder &b, mlir::Location loc) {
1140 LexicalScope lexScope{*this, loc,
1141 builder.getInsertionBlock()};
1142 res = whileStmtBuilder();
1143 });
1144
1145 if (res.failed())
1146 return res;
1147
1148 terminateStructuredRegionBody(whileOp.getBody(), getLoc(s.getEndLoc()));
1149 return mlir::success();
1150}
1151
1152mlir::LogicalResult CIRGenFunction::emitSwitchBody(const Stmt *s) {
1153 // It is rare but legal if the switch body is not a compound stmt. e.g.,
1154 //
1155 // switch(a)
1156 // while(...) {
1157 // case1
1158 // ...
1159 // case2
1160 // ...
1161 // }
1162 if (!isa<CompoundStmt>(s))
1163 return emitStmt(s, /*useCurrentScope=*/true);
1164
1166
1167 mlir::Block *swtichBlock = builder.getBlock();
1168 for (auto *c : compoundStmt->body()) {
1169 if (auto *switchCase = dyn_cast<SwitchCase>(c)) {
1170 builder.setInsertionPointToEnd(swtichBlock);
1171 // Reset insert point automatically, so that we can attach following
1172 // random stmt to the region of previous built case op to try to make
1173 // the being generated `cir.switch` to be in simple form.
1174 if (mlir::failed(
1175 emitSwitchCase(*switchCase, /*buildingTopLevelCase=*/true)))
1176 return mlir::failure();
1177
1178 continue;
1179 }
1180
1181 // Otherwise, just build the statements in the nearest case region.
1182 if (mlir::failed(emitStmt(c, /*useCurrentScope=*/!isa<CompoundStmt>(c))))
1183 return mlir::failure();
1184 }
1185
1186 return mlir::success();
1187}
1188
1190 // TODO: LLVM codegen does some early optimization to fold the condition and
1191 // only emit live cases. CIR should use MLIR to achieve similar things,
1192 // nothing to be done here.
1193 // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))...
1195
1196 SwitchOp swop;
1197 auto switchStmtBuilder = [&]() -> mlir::LogicalResult {
1198 if (s.getInit())
1199 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
1200 return mlir::failure();
1201
1202 if (s.getConditionVariable())
1203 emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true);
1204
1205 mlir::Value condV = emitScalarExpr(s.getCond());
1206
1207 // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts())
1210 // TODO: if the switch has a condition wrapped by __builtin_unpredictable?
1212
1213 mlir::LogicalResult res = mlir::success();
1214 swop = SwitchOp::create(
1215 builder, getLoc(s.getBeginLoc()), condV,
1216 /*switchBuilder=*/
1217 [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) {
1218 curLexScope->setAsSwitch();
1219
1220 condTypeStack.push_back(condV.getType());
1221
1222 res = emitSwitchBody(s.getBody());
1223
1224 condTypeStack.pop_back();
1225 });
1226
1227 return res;
1228 };
1229
1230 // The switch scope contains the full source range for SwitchStmt.
1231 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1232 mlir::LogicalResult res = mlir::success();
1233 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1234 [&](mlir::OpBuilder &b, mlir::Location loc) {
1235 LexicalScope lexScope{*this, loc,
1236 builder.getInsertionBlock()};
1237 res = switchStmtBuilder();
1238 });
1239
1241 swop.collectCases(cases);
1242 for (auto caseOp : cases)
1243 terminateStructuredRegionBody(caseOp.getCaseRegion(), caseOp.getLoc());
1244 terminateStructuredRegionBody(swop.getBody(), swop.getLoc());
1245
1246 swop.setAllEnumCasesCovered(s.isAllEnumCasesCovered());
1247
1248 return res;
1249}
1250
1251void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
1252 QualType ty) {
1253 if (rv.isScalar()) {
1254 builder.createStore(loc, rv.getValue(), returnValue);
1255 } else if (rv.isAggregate()) {
1256 LValue dest = makeAddrLValue(returnValue, ty);
1259 } else {
1260 cgm.errorNYI(loc, "emitReturnOfRValue: complex return type");
1261 }
1262
1263 // Classic codegen emits a branch through any cleanups before continuing to
1264 // a shared return block. Because CIR handles branching through cleanups
1265 // during the CFG flattening phase, we can just emit the return statement
1266 // directly.
1267 // TODO(cir): Eliminate this redundant load and the store above when we can.
1268 // Load the value from `__retval` and return it via the `cir.return` op.
1269 cir::AllocaOp retAlloca =
1270 mlir::cast<cir::AllocaOp>(fnRetAlloca->getDefiningOp());
1271 auto value = cir::LoadOp::create(builder, loc, retAlloca.getAllocaType(),
1272 *fnRetAlloca);
1273
1274 cir::ReturnOp::create(builder, loc, {value});
1275}
static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf, const Stmt *exprResult, AggValueSlot slot, Address *lastValue)
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
Attr - This represents one attribute.
Definition Attr.h:46
Represents an attribute applied to a statement.
Definition Stmt.h:2209
BreakStmt - This represents a break.
Definition Stmt.h:3141
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
const clang::LangOptions & getLangOpts() const
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s, cxxTryBodyEmitter &bodyCallback)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s)
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
Address returnValue
The temporary alloca to hold the return value.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
mlir::LogicalResult emitOMPSplitDirective(const OMPSplitDirective &s)
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc)
clang::ASTContext & getContext() const
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
CaseStmt - Represent a case statement.
Definition Stmt.h:1926
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1746
ContinueStmt - This represents a continue.
Definition Stmt.h:3125
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1637
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2838
This represents one expression.
Definition Expr.h:112
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
QualType getType() const
Definition Expr.h:144
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2894
GotoStmt - This represents a direct goto.
Definition Stmt.h:2975
IfStmt - This represents an if/then/else.
Definition Stmt.h:2265
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3014
Represents the declaration of a label.
Definition Decl.h:524
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.h:554
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2152
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A (possibly-)qualified type.
Definition TypeBase.h:937
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3166
Stmt - This represents one statement.
Definition Stmt.h:86
@ NoStmtClass
Definition Stmt.h:89
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2515
bool isVoidType() const
Definition TypeBase.h:9034
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2703
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const internal::VariadicDynCastAllOfMatcher< Stmt, CompoundStmt > compoundStmt
Matches compound statements.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, SwitchCase > switchCase
Matches case and default statements inside switch statements.
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1761
static bool aggValueSlotGC()
static bool loopInfoStack()
static bool emitCondLikelihoodViaExpectIntrinsic()
static bool constantFoldSwitchStatement()
static bool insertBuiltinUnpredictable()
static bool generateDebugInfo()
static bool loopSpecificCleanupHandling()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...