clang 23.0.0git
CIRGenStmt.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Stmt nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15
16#include "mlir/IR/Builders.h"
17#include "mlir/IR/Location.h"
18#include "mlir/Support/LLVM.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/Stmt.h"
24
25using namespace clang;
26using namespace clang::CIRGen;
27using namespace cir;
28
29static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf,
30 const Stmt *exprResult,
31 AggValueSlot slot,
32 Address *lastValue) {
33 // We have to special case labels here. They are statements, but when put
34 // at the end of a statement expression, they yield the value of their
35 // subexpression. Handle this by walking through all labels we encounter,
36 // emitting them before we evaluate the subexpr.
37 // Similar issues arise for attributed statements.
38 while (!isa<Expr>(exprResult)) {
39 if (const auto *ls = dyn_cast<LabelStmt>(exprResult)) {
40 if (cgf.emitLabel(*ls->getDecl()).failed())
41 return mlir::failure();
42 exprResult = ls->getSubStmt();
43 } else if (const auto *as = dyn_cast<AttributedStmt>(exprResult)) {
44 // FIXME: Update this if we ever have attributes that affect the
45 // semantics of an expression.
46 exprResult = as->getSubStmt();
47 } else {
48 llvm_unreachable("Unknown value statement");
49 }
50 }
51
52 const Expr *e = cast<Expr>(exprResult);
53 QualType exprTy = e->getType();
54 if (cgf.hasAggregateEvaluationKind(exprTy)) {
55 cgf.emitAggExpr(e, slot);
56 } else {
57 // We can't return an RValue here because there might be cleanups at
58 // the end of the StmtExpr. Because of that, we have to emit the result
59 // here into a temporary alloca.
60 cgf.emitAnyExprToMem(e, *lastValue, Qualifiers(),
61 /*IsInit*/ false);
62 }
63
64 return mlir::success();
65}
66
68 const CompoundStmt &s, Address *lastValue, AggValueSlot slot) {
69 mlir::LogicalResult result = mlir::success();
70 const Stmt *exprResult = s.body_back();
71 assert((!lastValue || (lastValue && exprResult)) &&
72 "If lastValue is not null then the CompoundStmt must have a "
73 "StmtExprResult");
74
75 for (const Stmt *curStmt : s.body()) {
76 const bool saveResult = lastValue && exprResult == curStmt;
77 if (saveResult) {
78 if (emitStmtWithResult(*this, exprResult, slot, lastValue).failed())
79 result = mlir::failure();
80 } else {
81 if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
82 result = mlir::failure();
83 }
84 }
85 return result;
86}
87
88mlir::LogicalResult
90 for (const Attr *attr : s.getAttrs()) {
91 switch (attr->getKind()) {
92 default:
93 break;
94 case attr::NoMerge:
95 case attr::NoInline:
96 case attr::AlwaysInline:
97 case attr::NoConvergent:
98 case attr::MustTail:
99 case attr::Atomic:
100 case attr::HLSLControlFlowHint:
101 cgm.errorNYI(s.getSourceRange(),
102 "Unimplemented statement attribute: ", attr->getKind());
103 break;
104 case attr::CXXAssume: {
105 const Expr *assumptionExpr = cast<CXXAssumeAttr>(attr)->getAssumption();
106 if (getLangOpts().CXXAssumptions && builder.getInsertionBlock() &&
107 !assumptionExpr->HasSideEffects(getContext())) {
108 mlir::Value assumptionValue = emitCheckedArgForAssume(assumptionExpr);
109 cir::AssumeOp::create(builder, getLoc(s.getSourceRange()),
110 assumptionValue);
111 }
112 } break;
113 }
114 }
115
116 return emitStmt(s.getSubStmt(), /*useCurrentScope=*/true, s.getAttrs());
117}
118
120 Address *lastValue,
121 AggValueSlot slot) {
122 // Add local scope to track new declared variables.
124 mlir::Location scopeLoc = getLoc(s.getSourceRange());
125 mlir::OpBuilder::InsertPoint scopeInsPt;
126 cir::ScopeOp::create(
127 builder, scopeLoc,
128 [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
129 scopeInsPt = b.saveInsertionPoint();
130 });
131 mlir::OpBuilder::InsertionGuard guard(builder);
132 builder.restoreInsertionPoint(scopeInsPt);
133 LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
134 return emitCompoundStmtWithoutScope(s, lastValue, slot);
135}
136
140
141// Build CIR for a statement. useCurrentScope should be true if no new scopes
142// need to be created when finding a compound statement.
143mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
144 bool useCurrentScope,
146 if (mlir::succeeded(emitSimpleStmt(s, useCurrentScope)))
147 return mlir::success();
148
149 switch (s->getStmtClass()) {
151 case Stmt::CXXCatchStmtClass:
152 case Stmt::SEHExceptStmtClass:
153 case Stmt::SEHFinallyStmtClass:
154 case Stmt::MSDependentExistsStmtClass:
155 case Stmt::UnresolvedSYCLKernelCallStmtClass:
156 llvm_unreachable("invalid statement class to emit generically");
157 case Stmt::BreakStmtClass:
158 case Stmt::NullStmtClass:
159 case Stmt::CompoundStmtClass:
160 case Stmt::ContinueStmtClass:
161 case Stmt::DeclStmtClass:
162 case Stmt::ReturnStmtClass:
163 llvm_unreachable("should have emitted these statements as simple");
164
165#define STMT(Type, Base)
166#define ABSTRACT_STMT(Op)
167#define EXPR(Type, Base) case Stmt::Type##Class:
168#include "clang/AST/StmtNodes.inc"
169 {
170 assert(builder.getInsertionBlock() &&
171 "expression emission must have an insertion point");
172
174
175 // Classic codegen has a check here to see if the emitter created a new
176 // block that isn't used (comparing the incoming and outgoing insertion
177 // points) and deletes the outgoing block if it's not used. In CIR, we
178 // will handle that during the cir.canonicalize pass.
179 return mlir::success();
180 }
181 case Stmt::IfStmtClass:
182 return emitIfStmt(cast<IfStmt>(*s));
183 case Stmt::SwitchStmtClass:
185 case Stmt::ForStmtClass:
186 return emitForStmt(cast<ForStmt>(*s));
187 case Stmt::WhileStmtClass:
189 case Stmt::DoStmtClass:
190 return emitDoStmt(cast<DoStmt>(*s));
191 case Stmt::CXXTryStmtClass:
193 case Stmt::CXXForRangeStmtClass:
195 case Stmt::CoroutineBodyStmtClass:
197 case Stmt::IndirectGotoStmtClass:
199 case Stmt::CoreturnStmtClass:
201 case Stmt::OpenACCComputeConstructClass:
203 case Stmt::OpenACCLoopConstructClass:
205 case Stmt::OpenACCCombinedConstructClass:
207 case Stmt::OpenACCDataConstructClass:
209 case Stmt::OpenACCEnterDataConstructClass:
211 case Stmt::OpenACCExitDataConstructClass:
213 case Stmt::OpenACCHostDataConstructClass:
215 case Stmt::OpenACCWaitConstructClass:
217 case Stmt::OpenACCInitConstructClass:
219 case Stmt::OpenACCShutdownConstructClass:
221 case Stmt::OpenACCSetConstructClass:
223 case Stmt::OpenACCUpdateConstructClass:
225 case Stmt::OpenACCCacheConstructClass:
227 case Stmt::OpenACCAtomicConstructClass:
229 case Stmt::GCCAsmStmtClass:
230 case Stmt::MSAsmStmtClass:
231 return emitAsmStmt(cast<AsmStmt>(*s));
232 case Stmt::OMPScopeDirectiveClass:
234 case Stmt::OMPErrorDirectiveClass:
236 case Stmt::OMPParallelDirectiveClass:
238 case Stmt::OMPTaskwaitDirectiveClass:
240 case Stmt::OMPTaskyieldDirectiveClass:
242 case Stmt::OMPBarrierDirectiveClass:
244 case Stmt::OMPMetaDirectiveClass:
246 case Stmt::OMPCanonicalLoopClass:
248 case Stmt::OMPSimdDirectiveClass:
250 case Stmt::OMPTileDirectiveClass:
252 case Stmt::OMPUnrollDirectiveClass:
254 case Stmt::OMPFuseDirectiveClass:
256 case Stmt::OMPForDirectiveClass:
258 case Stmt::OMPForSimdDirectiveClass:
260 case Stmt::OMPSectionsDirectiveClass:
262 case Stmt::OMPSectionDirectiveClass:
264 case Stmt::OMPSingleDirectiveClass:
266 case Stmt::OMPMasterDirectiveClass:
268 case Stmt::OMPCriticalDirectiveClass:
270 case Stmt::OMPParallelForDirectiveClass:
272 case Stmt::OMPParallelForSimdDirectiveClass:
275 case Stmt::OMPParallelMasterDirectiveClass:
277 case Stmt::OMPParallelSectionsDirectiveClass:
280 case Stmt::OMPTaskDirectiveClass:
282 case Stmt::OMPTaskgroupDirectiveClass:
284 case Stmt::OMPFlushDirectiveClass:
286 case Stmt::OMPDepobjDirectiveClass:
288 case Stmt::OMPScanDirectiveClass:
290 case Stmt::OMPOrderedDirectiveClass:
292 case Stmt::OMPAtomicDirectiveClass:
294 case Stmt::OMPTargetDirectiveClass:
296 case Stmt::OMPTeamsDirectiveClass:
298 case Stmt::OMPCancellationPointDirectiveClass:
301 case Stmt::OMPCancelDirectiveClass:
303 case Stmt::OMPTargetDataDirectiveClass:
305 case Stmt::OMPTargetEnterDataDirectiveClass:
308 case Stmt::OMPTargetExitDataDirectiveClass:
310 case Stmt::OMPTargetParallelDirectiveClass:
312 case Stmt::OMPTargetParallelForDirectiveClass:
315 case Stmt::OMPTaskLoopDirectiveClass:
317 case Stmt::OMPTaskLoopSimdDirectiveClass:
319 case Stmt::OMPMaskedTaskLoopDirectiveClass:
321 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
324 case Stmt::OMPMasterTaskLoopDirectiveClass:
326 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
329 case Stmt::OMPParallelGenericLoopDirectiveClass:
332 case Stmt::OMPParallelMaskedDirectiveClass:
334 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
337 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
340 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
343 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
346 case Stmt::OMPDistributeDirectiveClass:
348 case Stmt::OMPDistributeParallelForDirectiveClass:
351 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
354 case Stmt::OMPDistributeSimdDirectiveClass:
356 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
359 case Stmt::OMPTargetParallelForSimdDirectiveClass:
362 case Stmt::OMPTargetSimdDirectiveClass:
364 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
367 case Stmt::OMPTargetUpdateDirectiveClass:
369 case Stmt::OMPTeamsDistributeDirectiveClass:
372 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
375 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
378 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
381 case Stmt::OMPTeamsGenericLoopDirectiveClass:
384 case Stmt::OMPTargetTeamsDirectiveClass:
386 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
389 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
392 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
395 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
398 case Stmt::OMPInteropDirectiveClass:
400 case Stmt::OMPDispatchDirectiveClass:
402 case Stmt::OMPGenericLoopDirectiveClass:
404 case Stmt::OMPReverseDirectiveClass:
406 case Stmt::OMPInterchangeDirectiveClass:
408 case Stmt::OMPAssumeDirectiveClass:
410 case Stmt::OMPMaskedDirectiveClass:
412 case Stmt::OMPStripeDirectiveClass:
414 case Stmt::LabelStmtClass:
415 case Stmt::AttributedStmtClass:
416 case Stmt::GotoStmtClass:
417 case Stmt::DefaultStmtClass:
418 case Stmt::CaseStmtClass:
419 case Stmt::SEHLeaveStmtClass:
420 case Stmt::SYCLKernelCallStmtClass:
421 case Stmt::CapturedStmtClass:
422 case Stmt::ObjCAtTryStmtClass:
423 case Stmt::ObjCAtThrowStmtClass:
424 case Stmt::ObjCAtSynchronizedStmtClass:
425 case Stmt::ObjCForCollectionStmtClass:
426 case Stmt::ObjCAutoreleasePoolStmtClass:
427 case Stmt::SEHTryStmtClass:
428 case Stmt::ObjCAtCatchStmtClass:
429 case Stmt::ObjCAtFinallyStmtClass:
430 case Stmt::DeferStmtClass:
431 cgm.errorNYI(s->getSourceRange(),
432 std::string("emitStmt: ") + s->getStmtClassName());
433 return mlir::failure();
434 }
435
436 llvm_unreachable("Unexpected statement class");
437}
438
439mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
440 bool useCurrentScope) {
441 switch (s->getStmtClass()) {
442 default:
443 return mlir::failure();
444 case Stmt::DeclStmtClass:
445 return emitDeclStmt(cast<DeclStmt>(*s));
446 case Stmt::CompoundStmtClass:
447 if (useCurrentScope)
450 case Stmt::GotoStmtClass:
451 return emitGotoStmt(cast<GotoStmt>(*s));
452 case Stmt::ContinueStmtClass:
454
455 // NullStmt doesn't need any handling, but we need to say we handled it.
456 case Stmt::NullStmtClass:
457 break;
458
459 case Stmt::LabelStmtClass:
461 case Stmt::CaseStmtClass:
462 case Stmt::DefaultStmtClass:
463 // If we reached here, we must not handling a switch case in the top level.
465 /*buildingTopLevelCase=*/false);
466 break;
467
468 case Stmt::BreakStmtClass:
470 case Stmt::ReturnStmtClass:
472 case Stmt::AttributedStmtClass:
474 }
475
476 return mlir::success();
477}
478
480
481 if (emitLabel(*s.getDecl()).failed())
482 return mlir::failure();
483
484 if (getContext().getLangOpts().EHAsynch && s.isSideEntry())
485 getCIRGenModule().errorNYI(s.getSourceRange(), "IsEHa: not implemented.");
486
487 return emitStmt(s.getSubStmt(), /*useCurrentScope*/ true);
488}
489
490// Add a terminating yield on a body region if no other terminators are used.
491static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r,
492 mlir::Location loc) {
493 if (r.empty())
494 return;
495
497 unsigned numBlocks = r.getBlocks().size();
498 for (auto &block : r.getBlocks()) {
499 // Already cleanup after return operations, which might create
500 // empty blocks if emitted as last stmt.
501 if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() &&
502 block.hasNoSuccessors())
503 eraseBlocks.push_back(&block);
504
505 if (block.empty() ||
506 !block.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
507 mlir::OpBuilder::InsertionGuard guardCase(builder);
508 builder.setInsertionPointToEnd(&block);
509 builder.createYield(loc);
510 }
511 }
512
513 for (auto *b : eraseBlocks)
514 b->erase();
515}
516
517mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
518 mlir::LogicalResult res = mlir::success();
519 // The else branch of a consteval if statement is always the only branch
520 // that can be runtime evaluated.
521 const Stmt *constevalExecuted;
522 if (s.isConsteval()) {
523 constevalExecuted = s.isNegatedConsteval() ? s.getThen() : s.getElse();
524 if (!constevalExecuted) {
525 // No runtime code execution required
526 return res;
527 }
528 }
529
530 // C99 6.8.4.1: The first substatement is executed if the expression
531 // compares unequal to 0. The condition must be a scalar type.
532 auto ifStmtBuilder = [&]() -> mlir::LogicalResult {
533 if (s.isConsteval())
534 return emitStmt(constevalExecuted, /*useCurrentScope=*/true);
535
536 if (s.getInit())
537 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
538 return mlir::failure();
539
540 if (s.getConditionVariable())
541 emitDecl(*s.getConditionVariable());
542
543 // If the condition folds to a constant and this is an 'if constexpr',
544 // we simplify it early in CIRGen to avoid emitting the full 'if'.
545 bool condConstant;
546 if (constantFoldsToBool(s.getCond(), condConstant, s.isConstexpr())) {
547 if (s.isConstexpr()) {
548 // Handle "if constexpr" explicitly here to avoid generating some
549 // ill-formed code since in CIR the "if" is no longer simplified
550 // in this lambda like in Clang but postponed to other MLIR
551 // passes.
552 if (const Stmt *executed = condConstant ? s.getThen() : s.getElse())
553 return emitStmt(executed, /*useCurrentScope=*/true);
554 // There is nothing to execute at runtime.
555 // TODO(cir): there is still an empty cir.scope generated by the caller.
556 return mlir::success();
557 }
558 }
559
562 return emitIfOnBoolExpr(s.getCond(), s.getThen(), s.getElse());
563 };
564
565 // TODO: Add a new scoped symbol table.
566 // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
567 // The if scope contains the full source range for IfStmt.
568 mlir::Location scopeLoc = getLoc(s.getSourceRange());
569 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
570 [&](mlir::OpBuilder &b, mlir::Location loc) {
571 LexicalScope lexScope{*this, scopeLoc,
572 builder.getInsertionBlock()};
573 res = ifStmtBuilder();
574 });
575
576 return res;
577}
578
579mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
580 assert(builder.getInsertionBlock() && "expected valid insertion point");
581
582 for (const Decl *i : s.decls())
583 emitDecl(*i, /*evaluateConditionDecl=*/true);
584
585 return mlir::success();
586}
587
588mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
589 mlir::Location loc = getLoc(s.getSourceRange());
590 const Expr *rv = s.getRetValue();
591
592 RunCleanupsScope cleanupScope(*this);
593 bool createNewScope = false;
594 if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
595 rv = ewc->getSubExpr();
596 createNewScope = true;
597 }
598
599 auto handleReturnVal = [&]() {
600 if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
601 s.getNRVOCandidate()->isNRVOVariable()) {
603 // Apply the named return value optimization for this return statement,
604 // which means doing nothing: the appropriate result has already been
605 // constructed into the NRVO variable.
606
607 // If there is an NRVO flag for this variable, set it to 1 into indicate
608 // that the cleanup code should not destroy the variable.
609 if (auto nrvoFlag = nrvoFlags[s.getNRVOCandidate()])
610 builder.createFlagStore(loc, true, nrvoFlag);
611 } else if (!rv) {
612 // No return expression. Do nothing.
613 } else if (rv->getType()->isVoidType()) {
614 // Make sure not to return anything, but evaluate the expression
615 // for side effects.
616 if (rv) {
617 emitAnyExpr(rv);
618 }
619 } else if (cast<FunctionDecl>(curGD.getDecl())
620 ->getReturnType()
621 ->isReferenceType()) {
622 // If this function returns a reference, take the address of the
623 // expression rather than the value.
625 builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
626 *fnRetAlloca);
627 } else {
628 mlir::Value value = nullptr;
630 case cir::TEK_Scalar:
631 value = emitScalarExpr(rv);
632 if (value) { // Change this to an assert once emitScalarExpr is complete
633 builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
634 }
635 break;
636 case cir::TEK_Complex:
639 /*isInit=*/true);
640 break;
647 break;
648 }
649 }
650 };
651
652 if (!createNewScope) {
653 handleReturnVal();
654 } else {
655 mlir::Location scopeLoc =
656 getLoc(rv ? rv->getSourceRange() : s.getSourceRange());
657 // First create cir.scope and later emit it's body. Otherwise all CIRGen
658 // dispatched by `handleReturnVal()` might needs to manipulate blocks and
659 // look into parents, which are all unlinked.
660 mlir::OpBuilder::InsertPoint scopeBody;
661 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
662 [&](mlir::OpBuilder &b, mlir::Location loc) {
663 scopeBody = b.saveInsertionPoint();
664 });
665 {
666 mlir::OpBuilder::InsertionGuard guard(builder);
667 builder.restoreInsertionPoint(scopeBody);
668 CIRGenFunction::LexicalScope lexScope{*this, scopeLoc,
669 builder.getInsertionBlock()};
670 handleReturnVal();
671 }
672 }
673
674 cleanupScope.forceCleanup();
675
676 // Classic codegen emits a branch through any cleanups before continuing to
677 // a shared return block. Because CIR handles branching through cleanups
678 // during the CFG flattening phase, we can just emit the return statement
679 // directly.
680 // TODO(cir): Eliminate this redundant load and the store above when we can.
681 if (fnRetAlloca) {
682 // Load the value from `__retval` and return it via the `cir.return` op.
683 cir::AllocaOp retAlloca =
684 mlir::cast<cir::AllocaOp>(fnRetAlloca->getDefiningOp());
685 auto value = cir::LoadOp::create(builder, loc, retAlloca.getAllocaType(),
686 *fnRetAlloca);
687
688 cir::ReturnOp::create(builder, loc, {value});
689 } else {
690 cir::ReturnOp::create(builder, loc);
691 }
692
693 // Insert the new block to continue codegen after the return statement.
694 // This will get deleted if we don't populate it. This handles the case of
695 // unreachable statements below a return.
696 builder.createBlock(builder.getBlock()->getParent());
697 return mlir::success();
698}
699
700mlir::LogicalResult CIRGenFunction::emitGotoStmt(const clang::GotoStmt &s) {
701 // FIXME: LLVM codegen inserts emit a stop point here for debug info
702 // sake when the insertion point is available, but doesn't do
703 // anything special when there isn't. We haven't implemented debug
704 // info support just yet, look at this again once we have it.
706
707 cir::GotoOp::create(builder, getLoc(s.getSourceRange()),
708 s.getLabel()->getName());
709
710 // A goto marks the end of a block, create a new one for codegen after
711 // emitGotoStmt can resume building in that block.
712 // Insert the new block to continue codegen after goto.
713 builder.createBlock(builder.getBlock()->getParent());
714
715 return mlir::success();
716}
717
718mlir::LogicalResult
720 mlir::Value val = emitScalarExpr(s.getTarget());
721 assert(indirectGotoBlock &&
722 "If you jumping to a indirect branch should be alareadye emitted");
723 cir::BrOp::create(builder, getLoc(s.getSourceRange()), indirectGotoBlock,
724 val);
725 builder.createBlock(builder.getBlock()->getParent());
726 return mlir::success();
727}
728
729mlir::LogicalResult
731 builder.createContinue(getLoc(s.getKwLoc()));
732
733 // Insert the new block to continue codegen after the continue statement.
734 builder.createBlock(builder.getBlock()->getParent());
735
736 return mlir::success();
737}
738
739mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) {
740 // Create a new block to tag with a label and add a branch from
741 // the current one to it. If the block is empty just call attach it
742 // to this label.
743 mlir::Block *currBlock = builder.getBlock();
744 mlir::Block *labelBlock = currBlock;
745
746 if (!currBlock->empty() || currBlock->isEntryBlock()) {
747 {
748 mlir::OpBuilder::InsertionGuard guard(builder);
749 labelBlock = builder.createBlock(builder.getBlock()->getParent());
750 }
751 cir::BrOp::create(builder, getLoc(d.getSourceRange()), labelBlock);
752 }
753
754 builder.setInsertionPointToEnd(labelBlock);
755 cir::LabelOp label =
756 cir::LabelOp::create(builder, getLoc(d.getSourceRange()), d.getName());
757 builder.setInsertionPointToEnd(labelBlock);
758 auto func = cast<cir::FuncOp>(curFn);
759 cgm.mapBlockAddress(cir::BlockAddrInfoAttr::get(builder.getContext(),
760 func.getSymNameAttr(),
761 label.getLabelAttr()),
762 label);
763 // FIXME: emit debug info for labels, incrementProfileCounter
766 return mlir::success();
767}
768
770 builder.createBreak(getLoc(s.getKwLoc()));
771
772 // Insert the new block to continue codegen after the break statement.
773 builder.createBlock(builder.getBlock()->getParent());
774
775 return mlir::success();
776}
777
778template <typename T>
779mlir::LogicalResult
780CIRGenFunction::emitCaseDefaultCascade(const T *stmt, mlir::Type condType,
781 mlir::ArrayAttr value, CaseOpKind kind,
782 bool buildingTopLevelCase) {
783
785 "only case or default stmt go here");
786
787 mlir::LogicalResult result = mlir::success();
788
789 mlir::Location loc = getLoc(stmt->getBeginLoc());
790
791 enum class SubStmtKind { Case, Default, Other };
792 SubStmtKind subStmtKind = SubStmtKind::Other;
793 const Stmt *sub = stmt->getSubStmt();
794
795 mlir::OpBuilder::InsertPoint insertPoint;
796 CaseOp::create(builder, loc, value, kind, insertPoint);
797
798 {
799 mlir::OpBuilder::InsertionGuard guardSwitch(builder);
800 builder.restoreInsertionPoint(insertPoint);
801
802 if (isa<DefaultStmt>(sub) && isa<CaseStmt>(stmt)) {
803 subStmtKind = SubStmtKind::Default;
804 builder.createYield(loc);
805 } else if (isa<CaseStmt>(sub) && isa<DefaultStmt, CaseStmt>(stmt)) {
806 subStmtKind = SubStmtKind::Case;
807 builder.createYield(loc);
808 } else {
809 result = emitStmt(sub, /*useCurrentScope=*/!isa<CompoundStmt>(sub));
810 }
811
812 insertPoint = builder.saveInsertionPoint();
813 }
814
815 // If the substmt is default stmt or case stmt, try to handle the special case
816 // to make it into the simple form. e.g.
817 //
818 // swtich () {
819 // case 1:
820 // default:
821 // ...
822 // }
823 //
824 // we prefer generating
825 //
826 // cir.switch() {
827 // cir.case(equal, 1) {
828 // cir.yield
829 // }
830 // cir.case(default) {
831 // ...
832 // }
833 // }
834 //
835 // than
836 //
837 // cir.switch() {
838 // cir.case(equal, 1) {
839 // cir.case(default) {
840 // ...
841 // }
842 // }
843 // }
844 //
845 // We don't need to revert this if we find the current switch can't be in
846 // simple form later since the conversion itself should be harmless.
847 if (subStmtKind == SubStmtKind::Case) {
848 result = emitCaseStmt(*cast<CaseStmt>(sub), condType, buildingTopLevelCase);
849 } else if (subStmtKind == SubStmtKind::Default) {
850 result = emitDefaultStmt(*cast<DefaultStmt>(sub), condType,
851 buildingTopLevelCase);
852 } else if (buildingTopLevelCase) {
853 // If we're building a top level case, try to restore the insert point to
854 // the case we're building, then we can attach more random stmts to the
855 // case to make generating `cir.switch` operation to be a simple form.
856 builder.restoreInsertionPoint(insertPoint);
857 }
858
859 return result;
860}
861
862mlir::LogicalResult CIRGenFunction::emitCaseStmt(const CaseStmt &s,
863 mlir::Type condType,
864 bool buildingTopLevelCase) {
865 cir::CaseOpKind kind;
866 mlir::ArrayAttr value;
867 llvm::APSInt intVal = s.getLHS()->EvaluateKnownConstInt(getContext());
868
869 // If the case statement has an RHS value, it is representing a GNU
870 // case range statement, where LHS is the beginning of the range
871 // and RHS is the end of the range.
872 if (const Expr *rhs = s.getRHS()) {
873 llvm::APSInt endVal = rhs->EvaluateKnownConstInt(getContext());
874 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal),
875 cir::IntAttr::get(condType, endVal)});
876 kind = cir::CaseOpKind::Range;
877 } else {
878 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal)});
879 kind = cir::CaseOpKind::Equal;
880 }
881
882 return emitCaseDefaultCascade(&s, condType, value, kind,
883 buildingTopLevelCase);
884}
885
887 mlir::Type condType,
888 bool buildingTopLevelCase) {
889 return emitCaseDefaultCascade(&s, condType, builder.getArrayAttr({}),
890 cir::CaseOpKind::Default, buildingTopLevelCase);
891}
892
893mlir::LogicalResult CIRGenFunction::emitSwitchCase(const SwitchCase &s,
894 bool buildingTopLevelCase) {
895 assert(!condTypeStack.empty() &&
896 "build switch case without specifying the type of the condition");
897
898 if (s.getStmtClass() == Stmt::CaseStmtClass)
900 buildingTopLevelCase);
901
902 if (s.getStmtClass() == Stmt::DefaultStmtClass)
904 buildingTopLevelCase);
905
906 llvm_unreachable("expect case or default stmt");
907}
908
909mlir::LogicalResult
911 ArrayRef<const Attr *> forAttrs) {
912 cir::ForOp forOp;
913
914 // TODO(cir): pass in array of attributes.
915 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
916 mlir::LogicalResult loopRes = mlir::success();
917 // Evaluate the first pieces before the loop.
918 if (s.getInit())
919 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
920 return mlir::failure();
921 if (emitStmt(s.getRangeStmt(), /*useCurrentScope=*/true).failed())
922 return mlir::failure();
923 if (emitStmt(s.getBeginStmt(), /*useCurrentScope=*/true).failed())
924 return mlir::failure();
925 if (emitStmt(s.getEndStmt(), /*useCurrentScope=*/true).failed())
926 return mlir::failure();
927
929 // From LLVM: if there are any cleanups between here and the loop-exit
930 // scope, create a block to stage a loop exit along.
931 // We probably already do the right thing because of ScopeOp, but make
932 // sure we handle all cases.
934
935 forOp = builder.createFor(
936 getLoc(s.getSourceRange()),
937 /*condBuilder=*/
938 [&](mlir::OpBuilder &b, mlir::Location loc) {
939 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
940 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
941 mlir::Value condVal = evaluateExprAsBool(s.getCond());
942 builder.createCondition(condVal);
943 },
944 /*bodyBuilder=*/
945 [&](mlir::OpBuilder &b, mlir::Location loc) {
946 // https://en.cppreference.com/w/cpp/language/for
947 // In C++ the scope of the init-statement and the scope of
948 // statement are one and the same.
949 bool useCurrentScope = true;
950 if (emitStmt(s.getLoopVarStmt(), useCurrentScope).failed())
951 loopRes = mlir::failure();
952 if (emitStmt(s.getBody(), useCurrentScope).failed())
953 loopRes = mlir::failure();
954 emitStopPoint(&s);
955 },
956 /*stepBuilder=*/
957 [&](mlir::OpBuilder &b, mlir::Location loc) {
958 if (s.getInc())
959 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
960 loopRes = mlir::failure();
961 builder.createYield(loc);
962 });
963 return loopRes;
964 };
965
966 mlir::LogicalResult res = mlir::success();
967 mlir::Location scopeLoc = getLoc(s.getSourceRange());
968 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
969 [&](mlir::OpBuilder &b, mlir::Location loc) {
970 // Create a cleanup scope for the condition
971 // variable cleanups. Logical equivalent from
972 // LLVM codegn for LexicalScope
973 // ConditionScope(*this, S.getSourceRange())...
974 LexicalScope lexScope{*this, loc,
975 builder.getInsertionBlock()};
976 res = forStmtBuilder();
977 });
978
979 if (res.failed())
980 return res;
981
982 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
983 return mlir::success();
984}
985
986mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &s) {
987 cir::ForOp forOp;
988
989 // TODO: pass in an array of attributes.
990 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
991 mlir::LogicalResult loopRes = mlir::success();
992 // Evaluate the first part before the loop.
993 if (s.getInit())
994 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
995 return mlir::failure();
997 // In the classic codegen, if there are any cleanups between here and the
998 // loop-exit scope, a block is created to stage the loop exit. We probably
999 // already do the right thing because of ScopeOp, but we need more testing
1000 // to be sure we handle all cases.
1002
1003 forOp = builder.createFor(
1004 getLoc(s.getSourceRange()),
1005 /*condBuilder=*/
1006 [&](mlir::OpBuilder &b, mlir::Location loc) {
1007 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1008 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1009 mlir::Value condVal;
1010 if (s.getCond()) {
1011 // If the for statement has a condition scope,
1012 // emit the local variable declaration.
1013 if (s.getConditionVariable())
1014 emitDecl(*s.getConditionVariable());
1015 // C99 6.8.5p2/p4: The first substatement is executed if the
1016 // expression compares unequal to 0. The condition must be a
1017 // scalar type.
1018 condVal = evaluateExprAsBool(s.getCond());
1019 } else {
1020 condVal = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1021 }
1022 builder.createCondition(condVal);
1023 },
1024 /*bodyBuilder=*/
1025 [&](mlir::OpBuilder &b, mlir::Location loc) {
1026 // The scope of the for loop body is nested within the scope of the
1027 // for loop's init-statement and condition.
1028 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1029 loopRes = mlir::failure();
1030 emitStopPoint(&s);
1031 },
1032 /*stepBuilder=*/
1033 [&](mlir::OpBuilder &b, mlir::Location loc) {
1034 if (s.getInc())
1035 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
1036 loopRes = mlir::failure();
1037 builder.createYield(loc);
1038 });
1039 return loopRes;
1040 };
1041
1042 auto res = mlir::success();
1043 auto scopeLoc = getLoc(s.getSourceRange());
1044 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1045 [&](mlir::OpBuilder &b, mlir::Location loc) {
1046 LexicalScope lexScope{*this, loc,
1047 builder.getInsertionBlock()};
1048 res = forStmtBuilder();
1049 });
1050
1051 if (res.failed())
1052 return res;
1053
1054 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
1055 return mlir::success();
1056}
1057
1058mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &s) {
1059 cir::DoWhileOp doWhileOp;
1060
1061 // TODO: pass in array of attributes.
1062 auto doStmtBuilder = [&]() -> mlir::LogicalResult {
1063 mlir::LogicalResult loopRes = mlir::success();
1065 // From LLVM: if there are any cleanups between here and the loop-exit
1066 // scope, create a block to stage a loop exit along.
1067 // We probably already do the right thing because of ScopeOp, but make
1068 // sure we handle all cases.
1070
1071 doWhileOp = builder.createDoWhile(
1072 getLoc(s.getSourceRange()),
1073 /*condBuilder=*/
1074 [&](mlir::OpBuilder &b, mlir::Location loc) {
1075 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1076 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1077 // C99 6.8.5p2/p4: The first substatement is executed if the
1078 // expression compares unequal to 0. The condition must be a
1079 // scalar type.
1080 mlir::Value condVal = evaluateExprAsBool(s.getCond());
1081 builder.createCondition(condVal);
1082 },
1083 /*bodyBuilder=*/
1084 [&](mlir::OpBuilder &b, mlir::Location loc) {
1085 // The scope of the do-while loop body is a nested scope.
1086 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1087 loopRes = mlir::failure();
1088 emitStopPoint(&s);
1089 });
1090 return loopRes;
1091 };
1092
1093 mlir::LogicalResult res = mlir::success();
1094 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1095 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1096 [&](mlir::OpBuilder &b, mlir::Location loc) {
1097 LexicalScope lexScope{*this, loc,
1098 builder.getInsertionBlock()};
1099 res = doStmtBuilder();
1100 });
1101
1102 if (res.failed())
1103 return res;
1104
1105 terminateBody(builder, doWhileOp.getBody(), getLoc(s.getEndLoc()));
1106 return mlir::success();
1107}
1108
1109mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &s) {
1110 cir::WhileOp whileOp;
1111
1112 // TODO: pass in array of attributes.
1113 auto whileStmtBuilder = [&]() -> mlir::LogicalResult {
1114 mlir::LogicalResult loopRes = mlir::success();
1116 // From LLVM: if there are any cleanups between here and the loop-exit
1117 // scope, create a block to stage a loop exit along.
1118 // We probably already do the right thing because of ScopeOp, but make
1119 // sure we handle all cases.
1121
1122 whileOp = builder.createWhile(
1123 getLoc(s.getSourceRange()),
1124 /*condBuilder=*/
1125 [&](mlir::OpBuilder &b, mlir::Location loc) {
1126 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1127 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1128 mlir::Value condVal;
1129 // If the for statement has a condition scope,
1130 // emit the local variable declaration.
1131 if (s.getConditionVariable())
1132 emitDecl(*s.getConditionVariable());
1133 // C99 6.8.5p2/p4: The first substatement is executed if the
1134 // expression compares unequal to 0. The condition must be a
1135 // scalar type.
1136 condVal = evaluateExprAsBool(s.getCond());
1137 builder.createCondition(condVal);
1138 },
1139 /*bodyBuilder=*/
1140 [&](mlir::OpBuilder &b, mlir::Location loc) {
1141 // The scope of the while loop body is a nested scope.
1142 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1143 loopRes = mlir::failure();
1144 emitStopPoint(&s);
1145 });
1146 return loopRes;
1147 };
1148
1149 mlir::LogicalResult res = mlir::success();
1150 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1151 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1152 [&](mlir::OpBuilder &b, mlir::Location loc) {
1153 LexicalScope lexScope{*this, loc,
1154 builder.getInsertionBlock()};
1155 res = whileStmtBuilder();
1156 });
1157
1158 if (res.failed())
1159 return res;
1160
1161 terminateBody(builder, whileOp.getBody(), getLoc(s.getEndLoc()));
1162 return mlir::success();
1163}
1164
1165mlir::LogicalResult CIRGenFunction::emitSwitchBody(const Stmt *s) {
1166 // It is rare but legal if the switch body is not a compound stmt. e.g.,
1167 //
1168 // switch(a)
1169 // while(...) {
1170 // case1
1171 // ...
1172 // case2
1173 // ...
1174 // }
1175 if (!isa<CompoundStmt>(s))
1176 return emitStmt(s, /*useCurrentScope=*/true);
1177
1179
1180 mlir::Block *swtichBlock = builder.getBlock();
1181 for (auto *c : compoundStmt->body()) {
1182 if (auto *switchCase = dyn_cast<SwitchCase>(c)) {
1183 builder.setInsertionPointToEnd(swtichBlock);
1184 // Reset insert point automatically, so that we can attach following
1185 // random stmt to the region of previous built case op to try to make
1186 // the being generated `cir.switch` to be in simple form.
1187 if (mlir::failed(
1188 emitSwitchCase(*switchCase, /*buildingTopLevelCase=*/true)))
1189 return mlir::failure();
1190
1191 continue;
1192 }
1193
1194 // Otherwise, just build the statements in the nearest case region.
1195 if (mlir::failed(emitStmt(c, /*useCurrentScope=*/!isa<CompoundStmt>(c))))
1196 return mlir::failure();
1197 }
1198
1199 return mlir::success();
1200}
1201
1203 // TODO: LLVM codegen does some early optimization to fold the condition and
1204 // only emit live cases. CIR should use MLIR to achieve similar things,
1205 // nothing to be done here.
1206 // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))...
1208
1209 SwitchOp swop;
1210 auto switchStmtBuilder = [&]() -> mlir::LogicalResult {
1211 if (s.getInit())
1212 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
1213 return mlir::failure();
1214
1215 if (s.getConditionVariable())
1216 emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true);
1217
1218 mlir::Value condV = emitScalarExpr(s.getCond());
1219
1220 // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts())
1223 // TODO: if the switch has a condition wrapped by __builtin_unpredictable?
1225
1226 mlir::LogicalResult res = mlir::success();
1227 swop = SwitchOp::create(
1228 builder, getLoc(s.getBeginLoc()), condV,
1229 /*switchBuilder=*/
1230 [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) {
1231 curLexScope->setAsSwitch();
1232
1233 condTypeStack.push_back(condV.getType());
1234
1235 res = emitSwitchBody(s.getBody());
1236
1237 condTypeStack.pop_back();
1238 });
1239
1240 return res;
1241 };
1242
1243 // The switch scope contains the full source range for SwitchStmt.
1244 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1245 mlir::LogicalResult res = mlir::success();
1246 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1247 [&](mlir::OpBuilder &b, mlir::Location loc) {
1248 LexicalScope lexScope{*this, loc,
1249 builder.getInsertionBlock()};
1250 res = switchStmtBuilder();
1251 });
1252
1254 swop.collectCases(cases);
1255 for (auto caseOp : cases)
1256 terminateBody(builder, caseOp.getCaseRegion(), caseOp.getLoc());
1257 terminateBody(builder, swop.getBody(), swop.getLoc());
1258
1259 swop.setAllEnumCasesCovered(s.isAllEnumCasesCovered());
1260
1261 return res;
1262}
1263
1264void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
1265 QualType ty) {
1266 if (rv.isScalar()) {
1267 builder.createStore(loc, rv.getValue(), returnValue);
1268 } else if (rv.isAggregate()) {
1269 LValue dest = makeAddrLValue(returnValue, ty);
1272 } else {
1273 cgm.errorNYI(loc, "emitReturnOfRValue: complex return type");
1274 }
1275
1276 // Classic codegen emits a branch through any cleanups before continuing to
1277 // a shared return block. Because CIR handles branching through cleanups
1278 // during the CFG flattening phase, we can just emit the return statement
1279 // directly.
1280 // TODO(cir): Eliminate this redundant load and the store above when we can.
1281 // Load the value from `__retval` and return it via the `cir.return` op.
1282 cir::AllocaOp retAlloca =
1283 mlir::cast<cir::AllocaOp>(fnRetAlloca->getDefiningOp());
1284 auto value = cir::LoadOp::create(builder, loc, retAlloca.getAllocaType(),
1285 *fnRetAlloca);
1286
1287 cir::ReturnOp::create(builder, loc, {value});
1288}
static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, mlir::Location loc)
static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf, const Stmt *exprResult, AggValueSlot slot, Address *lastValue)
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
Attr - This represents one attribute.
Definition Attr.h:46
Represents an attribute applied to a statement.
Definition Stmt.h:2195
BreakStmt - This represents a break.
Definition Stmt.h:3127
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
const clang::LangOptions & getLangOpts() const
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
mlir::LogicalResult emitAttributedStmt(const AttributedStmt &s)
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
Address returnValue
The temporary alloca to hold the return value.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
clang::ASTContext & getContext() const
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
CaseStmt - Represent a case statement.
Definition Stmt.h:1912
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
ContinueStmt - This represents a continue.
Definition Stmt.h:3111
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1623
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2824
This represents one expression.
Definition Expr.h:112
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3670
QualType getType() const
Definition Expr.h:144
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2880
GotoStmt - This represents a direct goto.
Definition Stmt.h:2961
IfStmt - This represents an if/then/else.
Definition Stmt.h:2251
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3000
Represents the declaration of a label.
Definition Decl.h:524
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.h:554
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2138
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A (possibly-)qualified type.
Definition TypeBase.h:937
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3152
Stmt - This represents one statement.
Definition Stmt.h:86
@ NoStmtClass
Definition Stmt.h:89
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2501
bool isVoidType() const
Definition TypeBase.h:8991
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2689
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const internal::VariadicDynCastAllOfMatcher< Stmt, CompoundStmt > compoundStmt
Matches compound statements.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, SwitchCase > switchCase
Matches case and default statements inside switch statements.
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
static bool aggValueSlotGC()
static bool loopInfoStack()
static bool emitCondLikelihoodViaExpectIntrinsic()
static bool constantFoldSwitchStatement()
static bool insertBuiltinUnpredictable()
static bool requiresCleanups()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...